Skip to content

Commit

Permalink
[GNA] Adding missed ifdef's (openvinotoolkit#7167)
Browse files Browse the repository at this point in the history
* Added missed ifdef

* Moved ifdef's to avoid unused variables and unclosed scopes

* lower string number for ifdefs
  • Loading branch information
andreysapozhn authored Oct 6, 2021
1 parent 11516bb commit 9add27f
Showing 1 changed file with 33 additions and 31 deletions.
64 changes: 33 additions & 31 deletions inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -801,8 +801,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
uint32_t num_bytes_per_bias = component[i].op.affine.num_bytes_per_bias;
float weight_scale_factor = component[i].op.affine.weight_scale_factor;
float output_scale_factor = component[i].output_scale_factor;
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
uint32_t num_weight_rows = (component[i].operation == kDnnDiagonalOp) ? 1 : num_rows_out;
uint32_t num_weight_columns = num_rows_in;
#endif
if ((compute_precision_ == kDnnInt) && (logging_precision == kDnnFloat)) {
out_file << "<num_bytes_per_weight> " << std::dec << 4 << "\n";
out_file << "<num_bytes_per_bias> " << std::dec << 4 << "\n";
Expand All @@ -823,15 +825,15 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
<< GNAPluginNS::memory::MemoryOffset(component[i].op.affine.ptr_weights, ptr_dnn_memory_) << "\n";
out_file << "<bias_address> " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< GNAPluginNS::memory::MemoryOffset(component[i].op.affine.ptr_biases, ptr_dnn_memory_) << "\n";

#ifdef LIGHT_DUMP
std::ofstream out_wfile((out_file_name.str() + "_weights.txt").c_str(), std::ios::out);
std::ofstream out_bfile((out_file_name.str() + "_biases.txt").c_str(), std::ios::out);

#endif
if (num_bytes_per_weight == 1) {
if (num_bytes_per_bias != 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t* ptr_weight = reinterpret_cast<int8_t*>(component[i].op.affine.ptr_weights);
gna_compound_bias_t* ptr_bias = reinterpret_cast<gna_compound_bias_t*>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
if (logging_precision == kDnnFloat) {
Expand All @@ -847,8 +849,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t* ptr_weight = reinterpret_cast<int8_t*>(component[i].op.affine.ptr_weights);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
if (logging_precision == kDnnFloat) {
Expand All @@ -861,11 +863,11 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
out_wfile << "\n";
}
}
}
#endif
}
} else if (num_bytes_per_weight == 2) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.affine.ptr_weights);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
if (logging_precision == kDnnFloat) {
Expand All @@ -879,8 +881,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (compute_precision_ == kDnnFloat) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_weight = reinterpret_cast<float *>(component[i].op.affine.ptr_weights);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
out_wfile << std::setprecision(5)
Expand All @@ -896,9 +898,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
if (compute_precision_ == kDnnInt) {
if (num_bytes_per_weight == 1) {
if (num_bytes_per_bias != 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
gna_compound_bias_t
* ptr_biases = reinterpret_cast<gna_compound_bias_t*>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
if (logging_precision == kDnnInt) {
out_bfile << std::setw(8) << ptr_biases[row].bias << ", ";
Expand All @@ -909,8 +911,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t *ptr_biases = reinterpret_cast<int8_t*>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
if (logging_precision == kDnnInt) {
out_bfile << std::setw(8) << ptr_biases[row] << "\n";
Expand All @@ -921,8 +923,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
if (logging_precision == kDnnInt) {
out_bfile << std::setw(8) << ptr_biases[row] << "\n";
Expand All @@ -932,11 +934,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
}

} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_biases = reinterpret_cast<float *>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB

for (uint32_t row = 0; row < num_rows_out; row++) {
out_bfile << std::setprecision(5) << ptr_biases[row] << "\n";
}
Expand Down Expand Up @@ -976,15 +976,15 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
out_file << "<bias_address> " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< GNAPluginNS::memory::MemoryOffset(component[i].op.conv1D.ptr_biases, ptr_dnn_memory_) << "\n";


#ifdef LIGHT_DUMP
std::ofstream out_wfile((out_file_name.str() + "_weights.txt").c_str(), std::ios::out);
std::ofstream out_bfile((out_file_name.str() + "_biases.txt").c_str(), std::ios::out);

#endif

if (num_bytes_per_weight == 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t *ptr_weight = reinterpret_cast<int8_t *>(component[i].op.conv1D.ptr_filters);
gna_compound_bias_t *ptr_bias = reinterpret_cast<gna_compound_bias_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
for (uint32_t col = 0; col < num_filter_coefficients; col++) {
if (logging_precision == kDnnFloat) {
Expand All @@ -999,8 +999,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (num_bytes_per_weight == 2) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.conv1D.ptr_filters);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
for (uint32_t col = 0; col < num_filter_coefficients; col++) {
if (logging_precision == kDnnFloat) {
Expand All @@ -1015,8 +1015,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (compute_precision_ == kDnnFloat) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_weight = reinterpret_cast<float *>(component[i].op.conv1D.ptr_filters);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
for (uint32_t col = 0; col < num_filter_coefficients; col++) {
out_wfile << std::setprecision(12)
Expand All @@ -1032,9 +1032,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
if (compute_precision_ == kDnnInt) {
if (logging_precision == kDnnInt) {
if (num_bytes_per_weight == 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
gna_compound_bias_t
*ptr_biases = reinterpret_cast<gna_compound_bias_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< ptr_biases[row].bias << " ";
Expand All @@ -1043,26 +1043,26 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << "0x" << std::setfill('0') << std::setw(8) << std::hex << ptr_biases[row]
<< "\n";
}
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << std::setprecision(12)
<< ptr_biases[row] / output_scale_factor << "\n";
}
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_biases = reinterpret_cast<float *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << std::setprecision(12) << ptr_biases[row] << "\n";
}
Expand Down Expand Up @@ -1104,8 +1104,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
uint32_t num_vector_delay = component[i].op.recurrent.num_vector_delay;
uint32_t num_bytes_per_weight = component[i].op.recurrent.num_bytes_per_weight;
uint32_t num_bytes_per_bias = component[i].op.recurrent.num_bytes_per_bias;
#ifdef DUMP_WB
uint32_t num_weight_rows = num_columns_out;
uint32_t num_weight_columns = num_columns_in + num_columns_out;
#endif
out_file << "<num_vector_delay> " << std::dec << num_vector_delay << "\n";
if ((compute_precision_ == kDnnInt) && (logging_precision == kDnnFloat)) {
out_file << "<num_bytes_per_weight> " << std::dec << 4 << "\n";
Expand All @@ -1130,10 +1132,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
out_file << "<feedback_address> " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< GNAPluginNS::memory::MemoryOffset(component[i].op.recurrent.ptr_feedbacks, ptr_dnn_memory_) << "\n";
if (num_bytes_per_weight == 1) {
#ifdef DUMP_WB
int8_t *ptr_weight = reinterpret_cast<int8_t *>(component[i].op.recurrent.ptr_weights);
gna_compound_bias_t
*ptr_bias = reinterpret_cast<gna_compound_bias_t *>(component[i].op.recurrent.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
out_file << "<weight_row> ";
for (uint32_t col = 0; col < num_weight_columns; col++) {
Expand All @@ -1151,8 +1153,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (num_bytes_per_weight == 2) {
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.recurrent.ptr_weights);
#ifdef DUMP_WB
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.recurrent.ptr_weights);
for (uint32_t row = 0; row < num_weight_rows; row++) {
out_file << "<weight_row> ";
for (uint32_t col = 0; col < num_weight_columns; col++) {
Expand All @@ -1168,8 +1170,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (compute_precision_ == kDnnFloat) {
float *ptr_weight = reinterpret_cast<float *>(component[i].op.recurrent.ptr_weights);
#ifdef DUMP_WB
float *ptr_weight = reinterpret_cast<float *>(component[i].op.recurrent.ptr_weights);
for (uint32_t row = 0; row < num_weight_rows; row++) {
out_file << "<weight_row> ";
for (uint32_t col = 0; col < num_weight_columns; col++) {
Expand All @@ -1186,10 +1188,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
if (compute_precision_ == kDnnInt) {
if (logging_precision == kDnnInt) {
if (num_bytes_per_weight == 1) {
gna_compound_bias_t
*ptr_biases = reinterpret_cast<gna_compound_bias_t *>(component[i].op.recurrent.ptr_biases);
out_file << "<compound_bias>" << " ";
#ifdef DUMP_WB
gna_compound_bias_t
*ptr_biases = reinterpret_cast<gna_compound_bias_t *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< ptr_biases[col].bias << " ";
Expand All @@ -1198,29 +1200,29 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
out_file << "<bias>" << " ";
#ifdef DUMP_WB
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << "0x" << std::setfill('0') << std::setw(8) << std::hex << ptr_biases[col]
<< " ";
}
#endif
}
} else {
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
out_file << "<bias>" << " ";
#ifdef DUMP_WB
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << std::setprecision(12) << std::scientific
<< ptr_biases[col] / output_scale_factor << " ";
}
#endif
}
} else {
float *ptr_biases = reinterpret_cast<float *>(component[i].op.recurrent.ptr_biases);
out_file << "<bias>" << " ";
#ifdef DUMP_WB
float *ptr_biases = reinterpret_cast<float *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << std::setprecision(12) << std::scientific << ptr_biases[col] << " ";
}
Expand Down Expand Up @@ -2168,8 +2170,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteInputAndOutputText() {
in_file << std::setw(8) << floatValue / input_scale_factor << "\n";
}
}
#endif
}
#endif
}

uint32_t GNAPluginNS::backend::AMIntelDNN::num_components() {
Expand Down

0 comments on commit 9add27f

Please sign in to comment.