diff --git a/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoMerger.java b/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoMerger.java index f9add10..994b721 100644 --- a/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoMerger.java +++ b/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoMerger.java @@ -19,12 +19,14 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; // This class does not intend to do a fully-functional merge of two proto models, instead it focuses // on merging currently known potential discrepancies between old and new proto files: -// - Missing field in a new proto file -// - Missing values in enums -// - Mismatching service method google.api.method_signature option values +// - missing field in a new proto file; +// - missing values in enums; +// - mismatching service method google.api.method_signature option values (old overwrite new); +// - mismatching field annotations (old overwrite new). // // Additional merging functionality is expected to be added to this class as needed. public class ProtoMerger { @@ -79,24 +81,42 @@ private void mergeMessages(Map newMessages, Map newEnumsMap = new HashMap<>(); - for (Message nestedEnum : newMessage.getEnums()) { - newEnumsMap.put(nestedEnum.getName(), nestedEnum); - } - for (Message oldEnum : oldMessage.getEnums()) { - Message newEnum = newEnumsMap.get(oldEnum.getName()); - mergeFields(newEnum, oldEnum, newMessages); - } + mergeEnums(newMessages, oldMessage, newMessage); + } + } + + private void mergeEnums( + Map newMessages, Message oldMessage, Message newMessage) { + Map newEnumsMap = new HashMap<>(); + for (Message nestedEnum : newMessage.getEnums()) { + newEnumsMap.put(nestedEnum.getName(), nestedEnum); + } + for (Message oldEnum : oldMessage.getEnums()) { + Message newEnum = newEnumsMap.get(oldEnum.getName()); + mergeFields(newEnum, oldEnum, newMessages); } } private void mergeFields( Message newMessage, Message oldMessage, Map newMessages) { // Merge fields + Map newFieldsMap = + newMessage.getFields().stream().collect(Collectors.toMap(ProtoElement::getName, f -> f)); + for (Field oldField : oldMessage.getFields()) { // compareTo() will be used by contains() to search for the field, not equals(). if (!newMessage.getFields().contains(oldField)) { - newMessage.getFields().add(copyField(oldField, newMessages)); + // Copy removed field + newMessage.getFields().add(copyField(null, oldField, newMessages)); + } else { + Field newField = newFieldsMap.get(oldField.getName()); + // Copy missing options if a new field has less options than old field. + // This is a very primitive merge logic. Add a proper merge logic if ever needed. + if (oldField.getOptions().size() > newField.getOptions().size() + || oldField.isOptional() != newField.isOptional()) { + newMessage.getFields().remove(oldField); + newMessage.getFields().add(copyField(newField, oldField, newMessages)); + } } } } @@ -104,7 +124,7 @@ private void mergeFields( // We need to replace references for message types from oldMessages to newMessages. // Despite message types having the same names, they are two independently created // sets of objects. - private Field copyField(Field oldField, Map newMessages) { + private Field copyField(Field newField, Field oldField, Map newMessages) { Message valueType = Message.PRIMITIVES.get(oldField.getValueType().getName()); if (valueType == null) { valueType = newMessages.get(oldField.getValueType().getName()); @@ -123,7 +143,7 @@ private Field copyField(Field oldField, Map newMessages) { oldField.isRepeated(), oldField.isOptional(), keyType, - oldField.getDescription(), + newField != null ? newField.getDescription() : oldField.getDescription(), oldField.isFirstInOrder()); // Do not do deep copy for options because they are all generic immutable objects (strings or diff --git a/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoParser.java b/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoParser.java index 7f92b25..d9a3d5a 100644 --- a/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoParser.java +++ b/src/main/java/com/google/cloud/discotoproto3converter/proto3/ProtoParser.java @@ -204,7 +204,7 @@ private Field parseField(String fieldString, SortedMap nestedEn } Message keyType = Message.PRIMITIVES.get(m.group("keyType")); boolean repeated = m.group("repeated") != null || keyType != null; - boolean optional = m.group("optional") != null; + boolean optional = repeated || m.group("optional") != null; Field parsedField = new Field(m.group("name"), valueType, repeated, optional, keyType, null, false); diff --git a/src/test/resources/google/cloud/compute/v1small/compute.merge.proto.baseline b/src/test/resources/google/cloud/compute/v1small/compute.merge.proto.baseline index 47c5e16..0d5b6c3 100644 --- a/src/test/resources/google/cloud/compute/v1small/compute.merge.proto.baseline +++ b/src/test/resources/google/cloud/compute/v1small/compute.merge.proto.baseline @@ -327,7 +327,7 @@ message DeleteAddressRequest { // For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. // // The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000). - optional string request_id = 37109963; + string request_id = 37109963 [(google.api.field_behavior) = REQUIRED]; } diff --git a/src/test/resources/google/cloud/compute/v1small/compute.proto b/src/test/resources/google/cloud/compute/v1small/compute.proto index 47c5e16..bd94fd1 100644 --- a/src/test/resources/google/cloud/compute/v1small/compute.proto +++ b/src/test/resources/google/cloud/compute/v1small/compute.proto @@ -327,8 +327,7 @@ message DeleteAddressRequest { // For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. // // The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000). - optional string request_id = 37109963; - + string request_id = 37109963 [(google.api.field_behavior) = REQUIRED]; } // [Output Only] If errors are generated during processing of the operation, this field will be populated.