Skip to content

Commit

Permalink
fix: memory leak of multi initialization.
Browse files Browse the repository at this point in the history
When initializing desc the memory will be allocated. But it will allocate
again when loading the quantized weights. So one piece of memory will
lost which will make the app memory leaking.
  • Loading branch information
i8run committed Jun 15, 2018
1 parent 31fed91 commit c08d314
Showing 1 changed file with 0 additions and 8 deletions.
8 changes: 0 additions & 8 deletions bigquant/native/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,6 @@ void InternalQuantizedConvKernelInit(QuantizedTensorDesc *quantized_tensor, floa
void InternalQuantizedConvKernelLoadFromModel(QuantizedTensorDesc *quantized_tensor, int8_t *src, float *min,
float *max, size_t c_out, size_t c_in, size_t kernel_h, size_t kernel_w,
float threshold, LAYOUT layout) {
aligned_malloc(&(quantized_tensor->min), 64, quantized_tensor->workspace_size_per_meta_info);
aligned_malloc(&(quantized_tensor->max), 64, quantized_tensor->workspace_size_per_meta_info);
aligned_malloc(&(quantized_tensor->ratio), 64, quantized_tensor->workspace_size_per_meta_info);
aligned_malloc(&(quantized_tensor->data), 64, quantized_tensor->workspace_size);
std::vector<float> fp_model(c_out * c_in * kernel_h * kernel_w);
DequantizeModel(fp_model.data(), src, min, max, c_out, c_in, kernel_h, kernel_w);
float *tmp;
Expand Down Expand Up @@ -230,10 +226,6 @@ void InternalQuantizedFCKernelInit(QuantizedTensorDesc *quantized_tensor, float
void InternalQuantizedFCKernelLoadFromModel(QuantizedTensorDesc *quantized_tensor, int8_t *src, float *min, float *max,
size_t c_out, size_t c_in, float threshold, LAYOUT layout) {
assert((layout == NCHW) || (layout == NHWC));
aligned_malloc(&(quantized_tensor->min), 64, quantized_tensor->workspace_size_per_meta_info);
aligned_malloc(&(quantized_tensor->max), 64, quantized_tensor->workspace_size_per_meta_info);
aligned_malloc(&(quantized_tensor->ratio), 64, quantized_tensor->workspace_size_per_meta_info);
aligned_malloc(&(quantized_tensor->data), 64, quantized_tensor->workspace_size);
std::vector<float> fp_model(c_out * c_in);
DequantizeModel(fp_model.data(), src, min, max, c_out, c_in, 1, 1);
shuffle::PadQuantizeShuffle2D<float, FC_SHUFFLE_KERNEL_M, FC_SHUFFLE_KERNEL_K>(
Expand Down

0 comments on commit c08d314

Please sign in to comment.