Skip to content

Commit

Permalink
Merge pull request google#418 from FreddyFunk/master
Browse files Browse the repository at this point in the history
Fixed hundreds compiler warnings google#411
  • Loading branch information
tomfinegan authored Jul 17, 2018
2 parents 1695423 + c0c6ee1 commit fc44099
Show file tree
Hide file tree
Showing 76 changed files with 172 additions and 172 deletions.
2 changes: 1 addition & 1 deletion src/draco/attributes/attribute_octahedron_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ AttributeOctahedronTransform::GeneratePortableAttribute(
DRACO_DCHECK(is_initialized());

// Allocate portable attribute.
const int num_entries = point_ids.size();
const int num_entries = static_cast<int>(point_ids.size());
std::unique_ptr<PointAttribute> portable_attribute =
InitPortableAttribute(num_entries, 2, num_points, attribute, true);

Expand Down
4 changes: 2 additions & 2 deletions src/draco/attributes/attribute_quantization_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ bool AttributeQuantizationTransform::ComputeParameters(
attribute.GetValue(AttributeValueIndex(0), min_values_.data());
attribute.GetValue(AttributeValueIndex(0), max_values.get());

for (AttributeValueIndex i(1); i < attribute.size(); ++i) {
for (AttributeValueIndex i(1); i < static_cast<uint32_t>(attribute.size()); ++i) {
attribute.GetValue(i, att_val.get());
for (int c = 0; c < num_components; ++c) {
if (min_values_[c] > att_val[c])
Expand Down Expand Up @@ -144,7 +144,7 @@ AttributeQuantizationTransform::GeneratePortableAttribute(
DRACO_DCHECK(is_initialized());

// Allocate portable attribute.
const int num_entries = point_ids.size();
const int num_entries = static_cast<int>(point_ids.size());
const int num_components = attribute.num_components();
std::unique_ptr<PointAttribute> portable_attribute = InitPortableAttribute(
num_entries, num_components, num_points, attribute, true);
Expand Down
2 changes: 1 addition & 1 deletion src/draco/attributes/attribute_transform_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class AttributeTransformData {
// Sets a parameter value at the end of the |buffer_|.
template <typename DataTypeT>
void AppendParameterValue(const DataTypeT &in_data) {
SetParameterValue(buffer_.data_size(), in_data);
SetParameterValue(static_cast<int>(buffer_.data_size()), in_data);
}

private:
Expand Down
6 changes: 3 additions & 3 deletions src/draco/attributes/point_attribute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ bool PointAttribute::Reset(size_t num_attribute_values) {
return false;
// Assign the new buffer to the parent attribute.
ResetBuffer(attribute_buffer_.get(), entry_size, 0);
num_unique_entries_ = num_attribute_values;
num_unique_entries_ = static_cast<uint32_t>(num_attribute_values);
return true;
}

Expand Down Expand Up @@ -188,12 +188,12 @@ AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
// The number of points is equal to the number of old unique values.
SetExplicitMapping(num_unique_entries_);
// Update the explicit map.
for (int i = 0; i < num_unique_entries_; ++i) {
for (uint32_t i = 0; i < num_unique_entries_; ++i) {
SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]);
}
} else {
// Update point to value map using the mapping between old and new values.
for (PointIndex i(0); i < indices_map_.size(); ++i) {
for (PointIndex i(0); i < static_cast<uint32_t>(indices_map_.size()); ++i) {
SetPointMapEntry(i, value_map[indices_map_[i]]);
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/draco/attributes/point_attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class PointAttribute : public GeometryAttribute {

// Sets the new number of unique attribute entries for the attribute.
void Resize(size_t new_num_unique_entries) {
num_unique_entries_ = new_num_unique_entries;
num_unique_entries_ = static_cast<uint32_t>(new_num_unique_entries);
}

// Functions for setting the type of mapping between point indices and
Expand Down
2 changes: 1 addition & 1 deletion src/draco/compression/attributes/attributes_decoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
return false;
point_attribute_ids_.resize(num_attributes);
PointCloud *pc = point_cloud_;
for (int i = 0; i < num_attributes; ++i) {
for (uint32_t i = 0; i < num_attributes; ++i) {
// Decode attribute descriptor data.
uint8_t att_type, data_type, num_components, normalized;
if (!in_buffer->Decode(&att_type))
Expand Down
4 changes: 2 additions & 2 deletions src/draco/compression/attributes/attributes_decoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class AttributesDecoder : public AttributesDecoderInterface {
return point_attribute_ids_[i];
}
int32_t GetNumAttributes() const override {
return point_attribute_ids_.size();
return static_cast<int32_t>(point_attribute_ids_.size());
}
PointCloudDecoder *GetDecoder() const override {
return point_cloud_decoder_;
Expand All @@ -65,7 +65,7 @@ class AttributesDecoder : public AttributesDecoderInterface {

protected:
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size = point_attribute_to_local_id_map_.size();
const int id_map_size = static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size)
return -1;
return point_attribute_to_local_id_map_[point_attribute_id];
Expand Down
2 changes: 1 addition & 1 deletion src/draco/compression/attributes/attributes_encoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ bool AttributesEncoder::Initialize(PointCloudEncoder *encoder,
bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
// Encode data about all attributes.
EncodeVarint(num_attributes(), out_buffer);
for (int i = 0; i < num_attributes(); ++i) {
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int32_t att_id = point_attribute_ids_[i];
const PointAttribute *const pa = point_cloud_->attribute(att_id);
out_buffer->Encode(static_cast<uint8_t>(pa->attribute_type()));
Expand Down
6 changes: 3 additions & 3 deletions src/draco/compression/attributes/attributes_encoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class AttributesEncoder {
point_attribute_ids_.push_back(id);
if (id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size()))
point_attribute_to_local_id_map_.resize(id + 1, -1);
point_attribute_to_local_id_map_[id] = point_attribute_ids_.size() - 1;
point_attribute_to_local_id_map_[id] = static_cast<int32_t>(point_attribute_ids_.size()) - 1;
}

// Sets new attribute point ids (replacing the existing ones).
Expand All @@ -102,7 +102,7 @@ class AttributesEncoder {
}

int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; }
uint32_t num_attributes() const { return point_attribute_ids_.size(); }
uint32_t num_attributes() const { return static_cast<uint32_t>(point_attribute_ids_.size()); }
PointCloudEncoder *encoder() const { return point_cloud_encoder_; }

protected:
Expand All @@ -122,7 +122,7 @@ class AttributesEncoder {
}

int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size = point_attribute_to_local_id_map_.size();
const int id_map_size = static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size)
return -1;
return point_attribute_to_local_id_map_[point_attribute_id];
Expand Down
12 changes: 6 additions & 6 deletions src/draco/compression/attributes/kd_tree_attributes_decoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,15 +95,15 @@ class PointAttributeVectorOutputIterator {
if (data_size != 4) { // handle uint16_t, uint8_t
// selectively copy data bytes
uint8_t *data_counter = data_;
for (auto index = 0; index < num_components;
for (uint32_t index = 0; index < num_components;
index += 1, data_counter += data_size) {
std::memcpy(data_counter, data_source + index, data_size);
}
// redirect to copied data
data_source = reinterpret_cast<uint32_t *>(data_);
}
const AttributeValueIndex avi = attribute->mapped_index(point_id_);
if (avi >= attribute->size())
if (avi >= static_cast<uint32_t>(attribute->size()))
return *this;
attribute->SetAttributeValue(avi, data_source);
}
Expand Down Expand Up @@ -267,7 +267,7 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
AttributeQuantizationTransform transform;
transform.SetParameters(quantization_bits, min_value.data(),
num_components, max_value_dif);
const int num_transforms = attribute_quantization_transforms_.size();
const int num_transforms = static_cast<int>(attribute_quantization_transforms_.size());
if (!transform.TransferToAttribute(
quantized_portable_attributes_[num_transforms].get()))
return false;
Expand All @@ -289,7 +289,7 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
const uint32_t attribute_count = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(attribute_count);
for (auto attribute_index = 0; attribute_index < attribute_count;
for (auto attribute_index = 0; static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) // increment the dimensionality as needed...
{
const int att_id = GetAttributeId(attribute_index);
Expand Down Expand Up @@ -336,7 +336,7 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
if (!in_buffer->Decode(&num_points))
return false;

for (auto attribute_index = 0; attribute_index < attribute_count;
for (auto attribute_index = 0; static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) {
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const attr =
Expand Down Expand Up @@ -410,7 +410,7 @@ bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType(
std::vector<UnsignedType> unsigned_val(att->num_components());
std::vector<SignedDataTypeT> signed_val(att->num_components());

for (AttributeValueIndex avi(0); avi < att->size(); ++avi) {
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size()); ++avi) {
att->GetValue(avi, &unsigned_val[0]);
for (int c = 0; c < att->num_components(); ++c) {
// Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for
Expand Down
10 changes: 5 additions & 5 deletions src/draco/compression/attributes/kd_tree_attributes_encoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
// the kd tree encoder (quantization of floating attributes for now).
const size_t num_points = encoder()->point_cloud()->num_points();
int num_components = 0;
for (int i = 0; i < num_attributes(); ++i) {
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
Expand All @@ -41,7 +41,7 @@ bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
num_components_ = num_components;

// Go over all attributes and quantize them if needed.
for (int i = 0; i < num_attributes(); ++i) {
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
Expand Down Expand Up @@ -78,7 +78,7 @@ bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
// the actual encoding of the data.
quantized_portable_attributes_.push_back(
attribute_quantization_transform.GeneratePortableAttribute(
*att, num_points));
*att, static_cast<int>(num_points)));
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// For signed types, find the minimum value for each component. These
Expand All @@ -87,7 +87,7 @@ bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
std::vector<int32_t> min_value(att->num_components(),
std::numeric_limits<int32_t>::max());
std::vector<int32_t> act_value(att->num_components());
for (AttributeValueIndex avi(0); avi < att->size(); ++avi) {
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size()); ++avi) {
att->ConvertValue<int32_t>(avi, &act_value[0]);
for (int c = 0; c < att->num_components(); ++c) {
if (min_value[c] > act_value[c])
Expand Down Expand Up @@ -145,7 +145,7 @@ bool KdTreeAttributesEncoder::EncodePortableAttributes(
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Copy data to the point vector.
for (int i = 0; i < num_attributes(); ++i) {
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class MeshTraversalSequencer : public PointsSequencer {
attribute->SetExplicitMapping(mesh_->num_points());
const size_t num_faces = mesh_->num_faces();
const size_t num_points = mesh_->num_points();
for (FaceIndex f(0); f < num_faces; ++f) {
for (FaceIndex f(0); f < static_cast<uint32_t>(num_faces); ++f) {
const auto &face = mesh_->face(f);
for (int p = 0; p < 3; ++p) {
const PointIndex point_id = face[p];
Expand Down
4 changes: 2 additions & 2 deletions src/draco/compression/attributes/normal_compression_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ class OctahedronToolBox {
if (abs_sum == 0) {
vec[0] = center_value_; // vec[1] == v[2] == 0
} else {
vec[0] = (static_cast<int64_t>(vec[0]) * center_value_) / abs_sum;
vec[1] = (static_cast<int64_t>(vec[1]) * center_value_) / abs_sum;
vec[0] = (static_cast<int64_t>(vec[0]) * static_cast<int64_t>(center_value_)) / abs_sum;
vec[1] = (static_cast<int64_t>(vec[1]) * static_cast<int64_t>(center_value_)) / abs_sum;
if (vec[2] >= 0) {
vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
// Used to store predicted value for multi-parallelogram prediction.
std::vector<DataTypeT> multi_pred_vals(num_components);

const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
const int corner_map_size = static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
Expand Down Expand Up @@ -211,7 +211,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
RAnsBitDecoder decoder;
if (!decoder.StartDecoding(buffer))
return false;
for (int j = 0; j < num_flags; ++j) {
for (uint32_t j = 0; j < num_flags; ++j) {
is_crease_edge_[i][j] = decoder.DecodeNextBit();
}
decoder.EndDecoding();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,10 @@ class MeshPredictionSchemeConstrainedMultiParallelogramEncoder
// TODO(ostava): This should be generalized in case we use other binary
// coding scheme.
const double entropy = ComputeBinaryShannonEntropy(
total_parallelogram, total_used_parallelograms);
static_cast<uint32_t>(total_parallelogram), static_cast<uint32_t>(total_used_parallelograms));

// Round up to the nearest full bit.
return ceil((double)total_parallelogram * entropy);
return static_cast<int64_t>(ceil(static_cast<double>(total_parallelogram) * entropy));
}

// Struct that contains data used for measuring the error of each available
Expand Down Expand Up @@ -209,7 +209,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
// We start processing the vertices from the end because this prediction uses
// data from previous entries that could be overwritten when an entry is
// processed.
for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
for (int p = static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1; p > 0; --p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);

Expand Down Expand Up @@ -386,7 +386,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
// Encode the crease edge flags in the reverse vertex order that is needed
// be the decoder. Note that for the currently supported mode, each vertex
// has exactly |num_used_parallelograms| edges that need to be encoded.
for (int j = is_crease_edge_[i].size() - num_used_parallelograms; j >= 0;
for (int j = static_cast<int>(is_crease_edge_[i].size()) - num_used_parallelograms; j >= 0;
j -= num_used_parallelograms) {
// Go over all edges of the current vertex.
for (int k = 0; k < num_used_parallelograms; ++k) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ bool MeshPredictionSchemeGeometricNormalDecoder<
// Expecting in_data in octahedral coordinates, i.e., portable attribute.
DRACO_DCHECK_EQ(num_components, 2);

const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
const int corner_map_size = static_cast<int>(this->mesh_data().data_to_corner_map()->size());

VectorD<int32_t, 3> pred_normal_3d;
int32_t pred_normal_oct[2];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ bool MeshPredictionSchemeGeometricNormalEncoder<DataTypeT, TransformT,

flip_normal_bit_encoder_.StartEncoding();

const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
const int corner_map_size = static_cast<int>(this->mesh_data().data_to_corner_map()->size());

VectorD<int32_t, 3> pred_normal_3d;
VectorD<int32_t, 2> pos_pred_normal_oct;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class MeshPredictionSchemeGeometricNormalPredictorArea
// Convert to int32_t, make sure entries are not too large.
constexpr int64_t upper_bound = 1 << 29;
if (this->normal_prediction_mode_ == ONE_TRIANGLE) {
const int32_t abs_sum = normal.AbsSum();
const int32_t abs_sum = static_cast<int32_t>(normal.AbsSum());
if (abs_sum > upper_bound) {
const int64_t quotient = abs_sum / upper_bound;
normal = normal / quotient;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ bool MeshPredictionSchemeMultiParallelogramDecoder<DataTypeT, TransformT,
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();

const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
const int corner_map_size = static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ bool MeshPredictionSchemeMultiParallelogramEncoder<DataTypeT, TransformT,

// We start processing from the end because this prediction uses data from
// previous entries that could be overwritten when an entry is processed.
for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
for (int p = static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1); p > 0; --p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ bool MeshPredictionSchemeParallelogramDecoder<DataTypeT, TransformT,
// Restore the first value.
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data);

const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
const int corner_map_size = static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
const int dst_offset = p * num_components;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ bool MeshPredictionSchemeParallelogramEncoder<DataTypeT, TransformT,
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
for (int p = static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1); p > 0; --p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
const int dst_offset = p * num_components;
if (!ComputeParallelogramPrediction(p, corner_id, table,
Expand Down
Loading

0 comments on commit fc44099

Please sign in to comment.