From 6a4444fcf4b04db0ef332d11902741762f0a39b0 Mon Sep 17 00:00:00 2001 From: Tony Tang Date: Thu, 14 Jul 2022 11:22:48 -0400 Subject: [PATCH 01/13] feat: copy preview Change Streams API (#1309) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update dependency com.google.cloud:libraries-bom to v26 (#1304) * chore(deps): update dependency com.google.cloud:libraries-bom to v26 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot * feat: Copy preview Change Streams API to java client Co-authored-by: WhiteSource Renovate Co-authored-by: Owl Bot --- .../bigtable/data/v2/stub/BigtableStub.java | 16 + .../data/v2/stub/BigtableStubSettings.java | 78 + .../data/v2/stub/GrpcBigtableStub.java | 78 + .../com/google/bigtable/v2/BigtableGrpc.java | 234 + .../com/google/bigtable/v2/BigtableProto.java | 458 +- .../com/google/bigtable/v2/DataProto.java | 54 +- .../v2/ListChangeStreamPartitionsRequest.java | 855 ++ ...hangeStreamPartitionsRequestOrBuilder.java | 89 + .../ListChangeStreamPartitionsResponse.java | 726 ++ ...angeStreamPartitionsResponseOrBuilder.java | 60 + .../bigtable/v2/ReadChangeStreamRequest.java | 2495 +++++ .../v2/ReadChangeStreamRequestOrBuilder.java | 308 + .../bigtable/v2/ReadChangeStreamResponse.java | 8471 +++++++++++++++++ .../v2/ReadChangeStreamResponseOrBuilder.java | 132 + .../bigtable/v2/StreamContinuationToken.java | 884 ++ .../v2/StreamContinuationTokenOrBuilder.java | 85 + .../bigtable/v2/StreamContinuationTokens.java | 929 ++ .../v2/StreamContinuationTokensOrBuilder.java | 76 + .../google/bigtable/v2/StreamPartition.java | 712 ++ .../bigtable/v2/StreamPartitionOrBuilder.java | 63 + .../proto/google/bigtable/v2/bigtable.proto | 246 + .../main/proto/google/bigtable/v2/data.proto | 27 + 22 files changed, 16940 insertions(+), 136 deletions(-) create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationToken.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokenOrBuilder.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokens.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokensOrBuilder.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartition.java create mode 100644 proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartitionOrBuilder.java diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java index a27d6a5f07..a3ab3f3951 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java @@ -22,12 +22,16 @@ import com.google.api.gax.rpc.UnaryCallable; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; +import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; import com.google.bigtable.v2.MutateRowsResponse; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.bigtable.v2.ReadModifyWriteRowRequest; import com.google.bigtable.v2.ReadModifyWriteRowResponse; import com.google.bigtable.v2.ReadRowsRequest; @@ -73,6 +77,18 @@ public UnaryCallable pingAndWarmCallabl throw new UnsupportedOperationException("Not implemented: readModifyWriteRowCallable()"); } + public ServerStreamingCallable< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsCallable() { + throw new UnsupportedOperationException( + "Not implemented: listChangeStreamPartitionsCallable()"); + } + + public ServerStreamingCallable + readChangeStreamCallable() { + throw new UnsupportedOperationException("Not implemented: readChangeStreamCallable()"); + } + @Override public abstract void close(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java index 2c12935b45..5c77a08132 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java @@ -35,12 +35,16 @@ import com.google.api.gax.rpc.UnaryCallSettings; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; +import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; import com.google.bigtable.v2.MutateRowsResponse; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.bigtable.v2.ReadModifyWriteRowRequest; import com.google.bigtable.v2.ReadModifyWriteRowResponse; import com.google.bigtable.v2.ReadRowsRequest; @@ -83,6 +87,11 @@ public class BigtableStubSettings extends StubSettings { private final UnaryCallSettings pingAndWarmSettings; private final UnaryCallSettings readModifyWriteRowSettings; + private final ServerStreamingCallSettings< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsSettings; + private final ServerStreamingCallSettings + readChangeStreamSettings; /** Returns the object with the settings used for calls to readRows. */ public ServerStreamingCallSettings readRowsSettings() { @@ -122,6 +131,19 @@ public UnaryCallSettings pingAndWarmSet return readModifyWriteRowSettings; } + /** Returns the object with the settings used for calls to listChangeStreamPartitions. */ + public ServerStreamingCallSettings< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsSettings() { + return listChangeStreamPartitionsSettings; + } + + /** Returns the object with the settings used for calls to readChangeStream. */ + public ServerStreamingCallSettings + readChangeStreamSettings() { + return readChangeStreamSettings; + } + public BigtableStub createStub() throws IOException { if (getTransportChannelProvider() .getTransportName() @@ -203,6 +225,9 @@ protected BigtableStubSettings(Builder settingsBuilder) throws IOException { checkAndMutateRowSettings = settingsBuilder.checkAndMutateRowSettings().build(); pingAndWarmSettings = settingsBuilder.pingAndWarmSettings().build(); readModifyWriteRowSettings = settingsBuilder.readModifyWriteRowSettings().build(); + listChangeStreamPartitionsSettings = + settingsBuilder.listChangeStreamPartitionsSettings().build(); + readChangeStreamSettings = settingsBuilder.readChangeStreamSettings().build(); } /** Builder for BigtableStubSettings. */ @@ -221,6 +246,12 @@ public static class Builder extends StubSettings.Builder readModifyWriteRowSettings; + private final ServerStreamingCallSettings.Builder< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsSettings; + private final ServerStreamingCallSettings.Builder< + ReadChangeStreamRequest, ReadChangeStreamResponse> + readChangeStreamSettings; private static final ImmutableMap> RETRYABLE_CODE_DEFINITIONS; @@ -241,6 +272,10 @@ public static class Builder extends StubSettings.BuildernewArrayList())); definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "no_retry_5_codes", ImmutableSet.copyOf(Lists.newArrayList())); + definitions.put( + "no_retry_6_codes", ImmutableSet.copyOf(Lists.newArrayList())); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } @@ -294,6 +329,22 @@ public static class Builder extends StubSettings.Builder>of( @@ -331,6 +384,8 @@ protected Builder(BigtableStubSettings settings) { checkAndMutateRowSettings = settings.checkAndMutateRowSettings.toBuilder(); pingAndWarmSettings = settings.pingAndWarmSettings.toBuilder(); readModifyWriteRowSettings = settings.readModifyWriteRowSettings.toBuilder(); + listChangeStreamPartitionsSettings = settings.listChangeStreamPartitionsSettings.toBuilder(); + readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder(); unaryMethodSettingsBuilders = ImmutableList.>of( @@ -389,6 +444,16 @@ private static Builder initDefaults(Builder builder) { .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_0_codes")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_0_params")); + builder + .listChangeStreamPartitionsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_5_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_5_params")); + + builder + .readChangeStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_6_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_6_params")); + return builder; } @@ -448,6 +513,19 @@ public UnaryCallSettings.Builder mutateRowS return readModifyWriteRowSettings; } + /** Returns the builder for the settings used for calls to listChangeStreamPartitions. */ + public ServerStreamingCallSettings.Builder< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsSettings() { + return listChangeStreamPartitionsSettings; + } + + /** Returns the builder for the settings used for calls to readChangeStream. */ + public ServerStreamingCallSettings.Builder + readChangeStreamSettings() { + return readChangeStreamSettings; + } + @Override public BigtableStubSettings build() throws IOException { return new BigtableStubSettings(this); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java index 3c4c967408..b2c219bb3f 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java @@ -28,18 +28,23 @@ import com.google.api.pathtemplate.PathTemplate; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; +import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; import com.google.bigtable.v2.MutateRowsResponse; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.bigtable.v2.ReadModifyWriteRowRequest; import com.google.bigtable.v2.ReadModifyWriteRowResponse; import com.google.bigtable.v2.ReadRowsRequest; import com.google.bigtable.v2.ReadRowsResponse; import com.google.bigtable.v2.SampleRowKeysRequest; import com.google.bigtable.v2.SampleRowKeysResponse; +import com.google.common.collect.ImmutableMap; import com.google.longrunning.stub.GrpcOperationsStub; import io.grpc.MethodDescriptor; import io.grpc.protobuf.ProtoUtils; @@ -122,6 +127,30 @@ public class GrpcBigtableStub extends BigtableStub { ProtoUtils.marshaller(ReadModifyWriteRowResponse.getDefaultInstance())) .build(); + private static final MethodDescriptor< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.bigtable.v2.Bigtable/ListChangeStreamPartitions") + .setRequestMarshaller( + ProtoUtils.marshaller(ListChangeStreamPartitionsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListChangeStreamPartitionsResponse.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + readChangeStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.bigtable.v2.Bigtable/ReadChangeStream") + .setRequestMarshaller( + ProtoUtils.marshaller(ReadChangeStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ReadChangeStreamResponse.getDefaultInstance())) + .build(); + private final ServerStreamingCallable readRowsCallable; private final ServerStreamingCallable sampleRowKeysCallable; @@ -132,6 +161,11 @@ public class GrpcBigtableStub extends BigtableStub { private final UnaryCallable pingAndWarmCallable; private final UnaryCallable readModifyWriteRowCallable; + private final ServerStreamingCallable< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsCallable; + private final ServerStreamingCallable + readChangeStreamCallable; private final BackgroundResource backgroundResources; private final GrpcOperationsStub operationsStub; @@ -298,6 +332,29 @@ protected GrpcBigtableStub( return builder.build(); }) .build(); + GrpcCallSettings + listChangeStreamPartitionsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(listChangeStreamPartitionsMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("table_name", String.valueOf(request.getTableName())); + return params.build(); + }) + .build(); + GrpcCallSettings + readChangeStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readChangeStreamMethodDescriptor) + .setParamsExtractor( + request -> { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("table_name", String.valueOf(request.getTableName())); + return params.build(); + }) + .build(); this.readRowsCallable = callableFactory.createServerStreamingCallable( @@ -324,6 +381,14 @@ protected GrpcBigtableStub( readModifyWriteRowTransportSettings, settings.readModifyWriteRowSettings(), clientContext); + this.listChangeStreamPartitionsCallable = + callableFactory.createServerStreamingCallable( + listChangeStreamPartitionsTransportSettings, + settings.listChangeStreamPartitionsSettings(), + clientContext); + this.readChangeStreamCallable = + callableFactory.createServerStreamingCallable( + readChangeStreamTransportSettings, settings.readChangeStreamSettings(), clientContext); this.backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); @@ -371,6 +436,19 @@ public UnaryCallable pingAndWarmCallabl return readModifyWriteRowCallable; } + @Override + public ServerStreamingCallable< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + listChangeStreamPartitionsCallable() { + return listChangeStreamPartitionsCallable; + } + + @Override + public ServerStreamingCallable + readChangeStreamCallable() { + return readChangeStreamCallable; + } + @Override public final void close() { try { diff --git a/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java b/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java index e6eaad7197..f1cfa5c841 100644 --- a/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java +++ b/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java @@ -334,6 +334,100 @@ private BigtableGrpc() {} return getReadModifyWriteRowMethod; } + private static volatile io.grpc.MethodDescriptor< + com.google.bigtable.v2.ListChangeStreamPartitionsRequest, + com.google.bigtable.v2.ListChangeStreamPartitionsResponse> + getListChangeStreamPartitionsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListChangeStreamPartitions", + requestType = com.google.bigtable.v2.ListChangeStreamPartitionsRequest.class, + responseType = com.google.bigtable.v2.ListChangeStreamPartitionsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.bigtable.v2.ListChangeStreamPartitionsRequest, + com.google.bigtable.v2.ListChangeStreamPartitionsResponse> + getListChangeStreamPartitionsMethod() { + io.grpc.MethodDescriptor< + com.google.bigtable.v2.ListChangeStreamPartitionsRequest, + com.google.bigtable.v2.ListChangeStreamPartitionsResponse> + getListChangeStreamPartitionsMethod; + if ((getListChangeStreamPartitionsMethod = BigtableGrpc.getListChangeStreamPartitionsMethod) + == null) { + synchronized (BigtableGrpc.class) { + if ((getListChangeStreamPartitionsMethod = BigtableGrpc.getListChangeStreamPartitionsMethod) + == null) { + BigtableGrpc.getListChangeStreamPartitionsMethod = + getListChangeStreamPartitionsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListChangeStreamPartitions")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.bigtable.v2.ListChangeStreamPartitionsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.bigtable.v2.ListChangeStreamPartitionsResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigtableMethodDescriptorSupplier("ListChangeStreamPartitions")) + .build(); + } + } + } + return getListChangeStreamPartitionsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.bigtable.v2.ReadChangeStreamRequest, + com.google.bigtable.v2.ReadChangeStreamResponse> + getReadChangeStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ReadChangeStream", + requestType = com.google.bigtable.v2.ReadChangeStreamRequest.class, + responseType = com.google.bigtable.v2.ReadChangeStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.bigtable.v2.ReadChangeStreamRequest, + com.google.bigtable.v2.ReadChangeStreamResponse> + getReadChangeStreamMethod() { + io.grpc.MethodDescriptor< + com.google.bigtable.v2.ReadChangeStreamRequest, + com.google.bigtable.v2.ReadChangeStreamResponse> + getReadChangeStreamMethod; + if ((getReadChangeStreamMethod = BigtableGrpc.getReadChangeStreamMethod) == null) { + synchronized (BigtableGrpc.class) { + if ((getReadChangeStreamMethod = BigtableGrpc.getReadChangeStreamMethod) == null) { + BigtableGrpc.getReadChangeStreamMethod = + getReadChangeStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ReadChangeStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.bigtable.v2.ReadChangeStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.bigtable.v2.ReadChangeStreamResponse.getDefaultInstance())) + .setSchemaDescriptor(new BigtableMethodDescriptorSupplier("ReadChangeStream")) + .build(); + } + } + } + return getReadChangeStreamMethod; + } + /** Creates a new async stub that supports all call types for the service */ public static BigtableStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = @@ -496,6 +590,42 @@ public void readModifyWriteRow( getReadModifyWriteRowMethod(), responseObserver); } + /** + * + * + *
+     * NOTE: This API is not generally available. Users must be allowlisted.
+     * Returns the current list of partitions that make up the table's
+     * change stream. The union of partitions will cover the entire keyspace.
+     * Partitions can be read with `ReadChangeStream`.
+     * 
+ */ + public void listChangeStreamPartitions( + com.google.bigtable.v2.ListChangeStreamPartitionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListChangeStreamPartitionsMethod(), responseObserver); + } + + /** + * + * + *
+     * NOTE: This API is not generally available. Users must be allowlisted.
+     * Reads changes from a table's change stream. Changes will
+     * reflect both user-initiated mutations and mutations that are caused by
+     * garbage collection.
+     * 
+ */ + public void readChangeStream( + com.google.bigtable.v2.ReadChangeStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getReadChangeStreamMethod(), responseObserver); + } + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) @@ -544,6 +674,20 @@ public final io.grpc.ServerServiceDefinition bindService() { com.google.bigtable.v2.ReadModifyWriteRowRequest, com.google.bigtable.v2.ReadModifyWriteRowResponse>( this, METHODID_READ_MODIFY_WRITE_ROW))) + .addMethod( + getListChangeStreamPartitionsMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.bigtable.v2.ListChangeStreamPartitionsRequest, + com.google.bigtable.v2.ListChangeStreamPartitionsResponse>( + this, METHODID_LIST_CHANGE_STREAM_PARTITIONS))) + .addMethod( + getReadChangeStreamMethod(), + io.grpc.stub.ServerCalls.asyncServerStreamingCall( + new MethodHandlers< + com.google.bigtable.v2.ReadChangeStreamRequest, + com.google.bigtable.v2.ReadChangeStreamResponse>( + this, METHODID_READ_CHANGE_STREAM))) .build(); } } @@ -688,6 +832,46 @@ public void readModifyWriteRow( request, responseObserver); } + + /** + * + * + *
+     * NOTE: This API is not generally available. Users must be allowlisted.
+     * Returns the current list of partitions that make up the table's
+     * change stream. The union of partitions will cover the entire keyspace.
+     * Partitions can be read with `ReadChangeStream`.
+     * 
+ */ + public void listChangeStreamPartitions( + com.google.bigtable.v2.ListChangeStreamPartitionsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getListChangeStreamPartitionsMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * NOTE: This API is not generally available. Users must be allowlisted.
+     * Reads changes from a table's change stream. Changes will
+     * reflect both user-initiated mutations and mutations that are caused by
+     * garbage collection.
+     * 
+ */ + public void readChangeStream( + com.google.bigtable.v2.ReadChangeStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncServerStreamingCall( + getChannel().newCall(getReadChangeStreamMethod(), getCallOptions()), + request, + responseObserver); + } } /** @@ -813,6 +997,39 @@ public com.google.bigtable.v2.ReadModifyWriteRowResponse readModifyWriteRow( return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getReadModifyWriteRowMethod(), getCallOptions(), request); } + + /** + * + * + *
+     * NOTE: This API is not generally available. Users must be allowlisted.
+     * Returns the current list of partitions that make up the table's
+     * change stream. The union of partitions will cover the entire keyspace.
+     * Partitions can be read with `ReadChangeStream`.
+     * 
+ */ + public java.util.Iterator + listChangeStreamPartitions( + com.google.bigtable.v2.ListChangeStreamPartitionsRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getListChangeStreamPartitionsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * NOTE: This API is not generally available. Users must be allowlisted.
+     * Reads changes from a table's change stream. Changes will
+     * reflect both user-initiated mutations and mutations that are caused by
+     * garbage collection.
+     * 
+ */ + public java.util.Iterator readChangeStream( + com.google.bigtable.v2.ReadChangeStreamRequest request) { + return io.grpc.stub.ClientCalls.blockingServerStreamingCall( + getChannel(), getReadChangeStreamMethod(), getCallOptions(), request); + } } /** @@ -903,6 +1120,8 @@ protected BigtableFutureStub build(io.grpc.Channel channel, io.grpc.CallOptions private static final int METHODID_CHECK_AND_MUTATE_ROW = 4; private static final int METHODID_PING_AND_WARM = 5; private static final int METHODID_READ_MODIFY_WRITE_ROW = 6; + private static final int METHODID_LIST_CHANGE_STREAM_PARTITIONS = 7; + private static final int METHODID_READ_CHANGE_STREAM = 8; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -963,6 +1182,19 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv (io.grpc.stub.StreamObserver) responseObserver); break; + case METHODID_LIST_CHANGE_STREAM_PARTITIONS: + serviceImpl.listChangeStreamPartitions( + (com.google.bigtable.v2.ListChangeStreamPartitionsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.bigtable.v2.ListChangeStreamPartitionsResponse>) + responseObserver); + break; + case METHODID_READ_CHANGE_STREAM: + serviceImpl.readChangeStream( + (com.google.bigtable.v2.ReadChangeStreamRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; default: throw new AssertionError(); } @@ -1032,6 +1264,8 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() { .addMethod(getCheckAndMutateRowMethod()) .addMethod(getPingAndWarmMethod()) .addMethod(getReadModifyWriteRowMethod()) + .addMethod(getListChangeStreamPartitionsMethod()) + .addMethod(getReadChangeStreamMethod()) .build(); } } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java index 2e7276b4de..e37fe2f8bb 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java @@ -95,6 +95,42 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_bigtable_v2_ReadModifyWriteRowResponse_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_bigtable_v2_ReadModifyWriteRowResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -110,136 +146,197 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "pi/field_behavior.proto\032\031google/api/reso" + "urce.proto\032\030google/api/routing.proto\032\035go" + "ogle/bigtable/v2/data.proto\032\036google/prot" - + "obuf/wrappers.proto\032\027google/rpc/status.p" - + "roto\"\326\001\n\017ReadRowsRequest\022>\n\ntable_name\030\001" - + " \001(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.c" - + "om/Table\022\026\n\016app_profile_id\030\005 \001(\t\022(\n\004rows" - + "\030\002 \001(\0132\032.google.bigtable.v2.RowSet\022-\n\006fi" - + "lter\030\003 \001(\0132\035.google.bigtable.v2.RowFilte" - + "r\022\022\n\nrows_limit\030\004 \001(\003\"\370\002\n\020ReadRowsRespon" - + "se\022>\n\006chunks\030\001 \003(\0132..google.bigtable.v2." - + "ReadRowsResponse.CellChunk\022\034\n\024last_scann" - + "ed_row_key\030\002 \001(\014\032\205\002\n\tCellChunk\022\017\n\007row_ke" - + "y\030\001 \001(\014\0221\n\013family_name\030\002 \001(\0132\034.google.pr" - + "otobuf.StringValue\022.\n\tqualifier\030\003 \001(\0132\033." - + "google.protobuf.BytesValue\022\030\n\020timestamp_" - + "micros\030\004 \001(\003\022\016\n\006labels\030\005 \003(\t\022\r\n\005value\030\006 " - + "\001(\014\022\022\n\nvalue_size\030\007 \001(\005\022\023\n\treset_row\030\010 \001" - + "(\010H\000\022\024\n\ncommit_row\030\t \001(\010H\000B\014\n\nrow_status" - + "\"n\n\024SampleRowKeysRequest\022>\n\ntable_name\030\001" - + " \001(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.c" - + "om/Table\022\026\n\016app_profile_id\030\002 \001(\t\">\n\025Samp" - + "leRowKeysResponse\022\017\n\007row_key\030\001 \001(\014\022\024\n\014of" - + "fset_bytes\030\002 \001(\003\"\266\001\n\020MutateRowRequest\022>\n" - + "\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"bigtableadmin" - + ".googleapis.com/Table\022\026\n\016app_profile_id\030" - + "\004 \001(\t\022\024\n\007row_key\030\002 \001(\014B\003\340A\002\0224\n\tmutations" - + "\030\003 \003(\0132\034.google.bigtable.v2.MutationB\003\340A" - + "\002\"\023\n\021MutateRowResponse\"\376\001\n\021MutateRowsReq" - + "uest\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"bigtab" - + "leadmin.googleapis.com/Table\022\026\n\016app_prof" - + "ile_id\030\003 \001(\t\022A\n\007entries\030\002 \003(\0132+.google.b" - + "igtable.v2.MutateRowsRequest.EntryB\003\340A\002\032" - + "N\n\005Entry\022\017\n\007row_key\030\001 \001(\014\0224\n\tmutations\030\002" - + " \003(\0132\034.google.bigtable.v2.MutationB\003\340A\002\"" - + "\217\001\n\022MutateRowsResponse\022=\n\007entries\030\001 \003(\0132" - + ",.google.bigtable.v2.MutateRowsResponse." - + "Entry\032:\n\005Entry\022\r\n\005index\030\001 \001(\003\022\"\n\006status\030" - + "\002 \001(\0132\022.google.rpc.Status\"\256\002\n\030CheckAndMu" - + "tateRowRequest\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372" - + "A$\n\"bigtableadmin.googleapis.com/Table\022\026" - + "\n\016app_profile_id\030\007 \001(\t\022\024\n\007row_key\030\002 \001(\014B" - + "\003\340A\002\0227\n\020predicate_filter\030\006 \001(\0132\035.google." - + "bigtable.v2.RowFilter\0224\n\016true_mutations\030" - + "\004 \003(\0132\034.google.bigtable.v2.Mutation\0225\n\017f" - + "alse_mutations\030\005 \003(\0132\034.google.bigtable.v" - + "2.Mutation\"6\n\031CheckAndMutateRowResponse\022" - + "\031\n\021predicate_matched\030\001 \001(\010\"i\n\022PingAndWar" - + "mRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bigtable" - + "admin.googleapis.com/Instance\022\026\n\016app_pro" - + "file_id\030\002 \001(\t\"\025\n\023PingAndWarmResponse\"\306\001\n" - + "\031ReadModifyWriteRowRequest\022>\n\ntable_name" - + "\030\001 \001(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis" - + ".com/Table\022\026\n\016app_profile_id\030\004 \001(\t\022\024\n\007ro" - + "w_key\030\002 \001(\014B\003\340A\002\022;\n\005rules\030\003 \003(\0132\'.google" - + ".bigtable.v2.ReadModifyWriteRuleB\003\340A\002\"B\n" - + "\032ReadModifyWriteRowResponse\022$\n\003row\030\001 \001(\013" - + "2\027.google.bigtable.v2.Row2\260\024\n\010Bigtable\022\233" - + "\002\n\010ReadRows\022#.google.bigtable.v2.ReadRow" - + "sRequest\032$.google.bigtable.v2.ReadRowsRe" - + "sponse\"\301\001\202\323\344\223\002>\"9/v2/{table_name=project" - + "s/*/instances/*/tables/*}:readRows:\001*\212\323\344" - + "\223\002N\022:\n\ntable_name\022,{table_name=projects/" - + "*/instances/*/tables/*}\022\020\n\016app_profile_i" - + "d\332A\ntable_name\332A\031table_name,app_profile_" - + "id0\001\022\254\002\n\rSampleRowKeys\022(.google.bigtable" - + ".v2.SampleRowKeysRequest\032).google.bigtab" - + "le.v2.SampleRowKeysResponse\"\303\001\202\323\344\223\002@\022>/v" - + "2/{table_name=projects/*/instances/*/tab" - + "les/*}:sampleRowKeys\212\323\344\223\002N\022:\n\ntable_name" - + "\022,{table_name=projects/*/instances/*/tab" - + "les/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031" - + "table_name,app_profile_id0\001\022\301\002\n\tMutateRo" - + "w\022$.google.bigtable.v2.MutateRowRequest\032" - + "%.google.bigtable.v2.MutateRowResponse\"\346" - + "\001\202\323\344\223\002?\":/v2/{table_name=projects/*/inst" - + "ances/*/tables/*}:mutateRow:\001*\212\323\344\223\002N\022:\n\n" - + "table_name\022,{table_name=projects/*/insta" - + "nces/*/tables/*}\022\020\n\016app_profile_id\332A\034tab" - + "le_name,row_key,mutations\332A+table_name,r" - + "ow_key,mutations,app_profile_id\022\263\002\n\nMuta" - + "teRows\022%.google.bigtable.v2.MutateRowsRe" - + "quest\032&.google.bigtable.v2.MutateRowsRes" - + "ponse\"\323\001\202\323\344\223\002@\";/v2/{table_name=projects" - + "/*/instances/*/tables/*}:mutateRows:\001*\212\323" - + "\344\223\002N\022:\n\ntable_name\022,{table_name=projects" - + "/*/instances/*/tables/*}\022\020\n\016app_profile_" - + "id\332A\022table_name,entries\332A!table_name,ent" - + "ries,app_profile_id0\001\022\255\003\n\021CheckAndMutate" - + "Row\022,.google.bigtable.v2.CheckAndMutateR" - + "owRequest\032-.google.bigtable.v2.CheckAndM" - + "utateRowResponse\"\272\002\202\323\344\223\002G\"B/v2/{table_na" - + "me=projects/*/instances/*/tables/*}:chec" - + "kAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{t" - + "able_name=projects/*/instances/*/tables/" - + "*}\022\020\n\016app_profile_id\332ABtable_name,row_ke" - + "y,predicate_filter,true_mutations,false_" - + "mutations\332AQtable_name,row_key,predicate" - + "_filter,true_mutations,false_mutations,a" - + "pp_profile_id\022\356\001\n\013PingAndWarm\022&.google.b" - + "igtable.v2.PingAndWarmRequest\032\'.google.b" - + "igtable.v2.PingAndWarmResponse\"\215\001\202\323\344\223\002+\"" - + "&/v2/{name=projects/*/instances/*}:ping:" - + "\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/insta" - + "nces/*}\022\020\n\016app_profile_id\332A\004name\332A\023name," - + "app_profile_id\022\335\002\n\022ReadModifyWriteRow\022-." - + "google.bigtable.v2.ReadModifyWriteRowReq" - + "uest\032..google.bigtable.v2.ReadModifyWrit" - + "eRowResponse\"\347\001\202\323\344\223\002H\"C/v2/{table_name=p" - + "rojects/*/instances/*/tables/*}:readModi" - + "fyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{tabl" - + "e_name=projects/*/instances/*/tables/*}\022" - + "\020\n\016app_profile_id\332A\030table_name,row_key,r" - + "ules\332A\'table_name,row_key,rules,app_prof" - + "ile_id\032\333\002\312A\027bigtable.googleapis.com\322A\275\002h" - + "ttps://www.googleapis.com/auth/bigtable." - + "data,https://www.googleapis.com/auth/big" - + "table.data.readonly,https://www.googleap" - + "is.com/auth/cloud-bigtable.data,https://" - + "www.googleapis.com/auth/cloud-bigtable.d" - + "ata.readonly,https://www.googleapis.com/" - + "auth/cloud-platform,https://www.googleap" - + "is.com/auth/cloud-platform.read-onlyB\353\002\n" - + "\026com.google.bigtable.v2B\rBigtableProtoP\001" - + "Z:google.golang.org/genproto/googleapis/" - + "bigtable/v2;bigtable\252\002\030Google.Cloud.Bigt" - + "able.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Goo" - + "gle::Cloud::Bigtable::V2\352A\\\n\"bigtableadm" - + "in.googleapis.com/Table\0226projects/{proje" - + "ct}/instances/{instance}/tables/{table}\352" - + "AP\n%bigtableadmin.googleapis.com/Instanc" - + "e\022\'projects/{project}/instances/{instanc" - + "e}b\006proto3" + + "obuf/duration.proto\032\037google/protobuf/tim" + + "estamp.proto\032\036google/protobuf/wrappers.p" + + "roto\032\027google/rpc/status.proto\"\326\001\n\017ReadRo" + + "wsRequest\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"b" + + "igtableadmin.googleapis.com/Table\022\026\n\016app" + + "_profile_id\030\005 \001(\t\022(\n\004rows\030\002 \001(\0132\032.google" + + ".bigtable.v2.RowSet\022-\n\006filter\030\003 \001(\0132\035.go" + + "ogle.bigtable.v2.RowFilter\022\022\n\nrows_limit" + + "\030\004 \001(\003\"\370\002\n\020ReadRowsResponse\022>\n\006chunks\030\001 " + + "\003(\0132..google.bigtable.v2.ReadRowsRespons" + + "e.CellChunk\022\034\n\024last_scanned_row_key\030\002 \001(" + + "\014\032\205\002\n\tCellChunk\022\017\n\007row_key\030\001 \001(\014\0221\n\013fami" + + "ly_name\030\002 \001(\0132\034.google.protobuf.StringVa" + + "lue\022.\n\tqualifier\030\003 \001(\0132\033.google.protobuf" + + ".BytesValue\022\030\n\020timestamp_micros\030\004 \001(\003\022\016\n" + + "\006labels\030\005 \003(\t\022\r\n\005value\030\006 \001(\014\022\022\n\nvalue_si" + + "ze\030\007 \001(\005\022\023\n\treset_row\030\010 \001(\010H\000\022\024\n\ncommit_" + + "row\030\t \001(\010H\000B\014\n\nrow_status\"n\n\024SampleRowKe" + + "ysRequest\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"b" + + "igtableadmin.googleapis.com/Table\022\026\n\016app" + + "_profile_id\030\002 \001(\t\">\n\025SampleRowKeysRespon" + + "se\022\017\n\007row_key\030\001 \001(\014\022\024\n\014offset_bytes\030\002 \001(" + + "\003\"\266\001\n\020MutateRowRequest\022>\n\ntable_name\030\001 \001" + + "(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.com" + + "/Table\022\026\n\016app_profile_id\030\004 \001(\t\022\024\n\007row_ke" + + "y\030\002 \001(\014B\003\340A\002\0224\n\tmutations\030\003 \003(\0132\034.google" + + ".bigtable.v2.MutationB\003\340A\002\"\023\n\021MutateRowR" + + "esponse\"\376\001\n\021MutateRowsRequest\022>\n\ntable_n" + + "ame\030\001 \001(\tB*\340A\002\372A$\n\"bigtableadmin.googlea" + + "pis.com/Table\022\026\n\016app_profile_id\030\003 \001(\t\022A\n" + + "\007entries\030\002 \003(\0132+.google.bigtable.v2.Muta" + + "teRowsRequest.EntryB\003\340A\002\032N\n\005Entry\022\017\n\007row" + + "_key\030\001 \001(\014\0224\n\tmutations\030\002 \003(\0132\034.google.b" + + "igtable.v2.MutationB\003\340A\002\"\217\001\n\022MutateRowsR" + + "esponse\022=\n\007entries\030\001 \003(\0132,.google.bigtab" + + "le.v2.MutateRowsResponse.Entry\032:\n\005Entry\022" + + "\r\n\005index\030\001 \001(\003\022\"\n\006status\030\002 \001(\0132\022.google." + + "rpc.Status\"\256\002\n\030CheckAndMutateRowRequest\022" + + ">\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"bigtableadm" + + "in.googleapis.com/Table\022\026\n\016app_profile_i" + + "d\030\007 \001(\t\022\024\n\007row_key\030\002 \001(\014B\003\340A\002\0227\n\020predica" + + "te_filter\030\006 \001(\0132\035.google.bigtable.v2.Row" + + "Filter\0224\n\016true_mutations\030\004 \003(\0132\034.google." + + "bigtable.v2.Mutation\0225\n\017false_mutations\030" + + "\005 \003(\0132\034.google.bigtable.v2.Mutation\"6\n\031C" + + "heckAndMutateRowResponse\022\031\n\021predicate_ma" + + "tched\030\001 \001(\010\"i\n\022PingAndWarmRequest\022;\n\004nam" + + "e\030\001 \001(\tB-\340A\002\372A\'\n%bigtableadmin.googleapi" + + "s.com/Instance\022\026\n\016app_profile_id\030\002 \001(\t\"\025" + + "\n\023PingAndWarmResponse\"\306\001\n\031ReadModifyWrit" + + "eRowRequest\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n" + + "\"bigtableadmin.googleapis.com/Table\022\026\n\016a" + + "pp_profile_id\030\004 \001(\t\022\024\n\007row_key\030\002 \001(\014B\003\340A" + + "\002\022;\n\005rules\030\003 \003(\0132\'.google.bigtable.v2.Re" + + "adModifyWriteRuleB\003\340A\002\"B\n\032ReadModifyWrit" + + "eRowResponse\022$\n\003row\030\001 \001(\0132\027.google.bigta" + + "ble.v2.Row\"{\n!ListChangeStreamPartitions" + + "Request\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"big" + + "tableadmin.googleapis.com/Table\022\026\n\016app_p" + + "rofile_id\030\002 \001(\t\"\\\n\"ListChangeStreamParti" + + "tionsResponse\0226\n\tpartition\030\001 \001(\0132#.googl" + + "e.bigtable.v2.StreamPartition\"\233\003\n\027ReadCh" + + "angeStreamRequest\022>\n\ntable_name\030\001 \001(\tB*\340" + + "A\002\372A$\n\"bigtableadmin.googleapis.com/Tabl" + + "e\022\026\n\016app_profile_id\030\002 \001(\t\0226\n\tpartition\030\003" + + " \001(\0132#.google.bigtable.v2.StreamPartitio" + + "n\0220\n\nstart_time\030\004 \001(\0132\032.google.protobuf." + + "TimestampH\000\022K\n\023continuation_tokens\030\006 \001(\013" + + "2,.google.bigtable.v2.StreamContinuation" + + "TokensH\000\022,\n\010end_time\030\005 \001(\0132\032.google.prot" + + "obuf.Timestamp\0225\n\022heartbeat_duration\030\007 \001" + + "(\0132\031.google.protobuf.DurationB\014\n\nstart_f" + + "rom\"\327\t\n\030ReadChangeStreamResponse\022N\n\013data" + + "_change\030\001 \001(\01327.google.bigtable.v2.ReadC" + + "hangeStreamResponse.DataChangeH\000\022K\n\thear" + + "tbeat\030\002 \001(\01326.google.bigtable.v2.ReadCha" + + "ngeStreamResponse.HeartbeatH\000\022P\n\014close_s" + + "tream\030\003 \001(\01328.google.bigtable.v2.ReadCha" + + "ngeStreamResponse.CloseStreamH\000\032\364\001\n\rMuta" + + "tionChunk\022X\n\nchunk_info\030\001 \001(\0132D.google.b" + + "igtable.v2.ReadChangeStreamResponse.Muta" + + "tionChunk.ChunkInfo\022.\n\010mutation\030\002 \001(\0132\034." + + "google.bigtable.v2.Mutation\032Y\n\tChunkInfo" + + "\022\032\n\022chunked_value_size\030\001 \001(\005\022\034\n\024chunked_" + + "value_offset\030\002 \001(\005\022\022\n\nlast_chunk\030\003 \001(\010\032\274" + + "\003\n\nDataChange\022J\n\004type\030\001 \001(\0162<.google.big" + + "table.v2.ReadChangeStreamResponse.DataCh" + + "ange.Type\022\031\n\021source_cluster_id\030\002 \001(\t\022\017\n\007" + + "row_key\030\003 \001(\014\0224\n\020commit_timestamp\030\004 \001(\0132" + + "\032.google.protobuf.Timestamp\022\022\n\ntiebreake" + + "r\030\005 \001(\005\022J\n\006chunks\030\006 \003(\0132:.google.bigtabl" + + "e.v2.ReadChangeStreamResponse.MutationCh" + + "unk\022\014\n\004done\030\010 \001(\010\022\r\n\005token\030\t \001(\t\0221\n\rlow_" + + "watermark\030\n \001(\0132\032.google.protobuf.Timest" + + "amp\"P\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\010\n\004USE" + + "R\020\001\022\026\n\022GARBAGE_COLLECTION\020\002\022\020\n\014CONTINUAT" + + "ION\020\003\032\207\001\n\tHeartbeat\022G\n\022continuation_toke" + + "n\030\001 \001(\0132+.google.bigtable.v2.StreamConti" + + "nuationToken\0221\n\rlow_watermark\030\002 \001(\0132\032.go" + + "ogle.protobuf.Timestamp\032{\n\013CloseStream\022\"" + + "\n\006status\030\001 \001(\0132\022.google.rpc.Status\022H\n\023co" + + "ntinuation_tokens\030\002 \003(\0132+.google.bigtabl" + + "e.v2.StreamContinuationTokenB\017\n\rstream_r" + + "ecord2\252\030\n\010Bigtable\022\233\002\n\010ReadRows\022#.google" + + ".bigtable.v2.ReadRowsRequest\032$.google.bi" + + "gtable.v2.ReadRowsResponse\"\301\001\202\323\344\223\002>\"9/v2" + + "/{table_name=projects/*/instances/*/tabl" + + "es/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{" + + "table_name=projects/*/instances/*/tables" + + "/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031tab" + + "le_name,app_profile_id0\001\022\254\002\n\rSampleRowKe" + + "ys\022(.google.bigtable.v2.SampleRowKeysReq" + + "uest\032).google.bigtable.v2.SampleRowKeysR" + + "esponse\"\303\001\202\323\344\223\002@\022>/v2/{table_name=projec" + + "ts/*/instances/*/tables/*}:sampleRowKeys" + + "\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projec" + + "ts/*/instances/*/tables/*}\022\020\n\016app_profil" + + "e_id\332A\ntable_name\332A\031table_name,app_profi" + + "le_id0\001\022\301\002\n\tMutateRow\022$.google.bigtable." + + "v2.MutateRowRequest\032%.google.bigtable.v2" + + ".MutateRowResponse\"\346\001\202\323\344\223\002?\":/v2/{table_" + + "name=projects/*/instances/*/tables/*}:mu" + + "tateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_n" + + "ame=projects/*/instances/*/tables/*}\022\020\n\016" + + "app_profile_id\332A\034table_name,row_key,muta" + + "tions\332A+table_name,row_key,mutations,app" + + "_profile_id\022\263\002\n\nMutateRows\022%.google.bigt" + + "able.v2.MutateRowsRequest\032&.google.bigta" + + "ble.v2.MutateRowsResponse\"\323\001\202\323\344\223\002@\";/v2/" + + "{table_name=projects/*/instances/*/table" + + "s/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022," + + "{table_name=projects/*/instances/*/table" + + "s/*}\022\020\n\016app_profile_id\332A\022table_name,entr" + + "ies\332A!table_name,entries,app_profile_id0" + + "\001\022\255\003\n\021CheckAndMutateRow\022,.google.bigtabl" + + "e.v2.CheckAndMutateRowRequest\032-.google.b" + + "igtable.v2.CheckAndMutateRowResponse\"\272\002\202" + + "\323\344\223\002G\"B/v2/{table_name=projects/*/instan" + + "ces/*/tables/*}:checkAndMutateRow:\001*\212\323\344\223" + + "\002N\022:\n\ntable_name\022,{table_name=projects/*" + + "/instances/*/tables/*}\022\020\n\016app_profile_id" + + "\332ABtable_name,row_key,predicate_filter,t" + + "rue_mutations,false_mutations\332AQtable_na" + + "me,row_key,predicate_filter,true_mutatio" + + "ns,false_mutations,app_profile_id\022\356\001\n\013Pi" + + "ngAndWarm\022&.google.bigtable.v2.PingAndWa" + + "rmRequest\032\'.google.bigtable.v2.PingAndWa" + + "rmResponse\"\215\001\202\323\344\223\002+\"&/v2/{name=projects/" + + "*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{n" + + "ame=projects/*/instances/*}\022\020\n\016app_profi" + + "le_id\332A\004name\332A\023name,app_profile_id\022\335\002\n\022R" + + "eadModifyWriteRow\022-.google.bigtable.v2.R" + + "eadModifyWriteRowRequest\032..google.bigtab" + + "le.v2.ReadModifyWriteRowResponse\"\347\001\202\323\344\223\002" + + "H\"C/v2/{table_name=projects/*/instances/" + + "*/tables/*}:readModifyWriteRow:\001*\212\323\344\223\002N\022" + + ":\n\ntable_name\022,{table_name=projects/*/in" + + "stances/*/tables/*}\022\020\n\016app_profile_id\332A\030" + + "table_name,row_key,rules\332A\'table_name,ro" + + "w_key,rules,app_profile_id\022\216\002\n\032ListChang" + + "eStreamPartitions\0225.google.bigtable.v2.L" + + "istChangeStreamPartitionsRequest\0326.googl" + + "e.bigtable.v2.ListChangeStreamPartitions" + + "Response\"\177\202\323\344\223\002P\"K/v2/{table_name=projec" + + "ts/*/instances/*/tables/*}:listChangeStr" + + "eamPartitions:\001*\332A\ntable_name\332A\031table_na" + + "me,app_profile_id0\001\022\346\001\n\020ReadChangeStream" + + "\022+.google.bigtable.v2.ReadChangeStreamRe" + + "quest\032,.google.bigtable.v2.ReadChangeStr" + + "eamResponse\"u\202\323\344\223\002F\"A/v2/{table_name=pro" + + "jects/*/instances/*/tables/*}:readChange" + + "Stream:\001*\332A\ntable_name\332A\031table_name,app_" + + "profile_id0\001\032\333\002\312A\027bigtable.googleapis.co" + + "m\322A\275\002https://www.googleapis.com/auth/big" + + "table.data,https://www.googleapis.com/au" + + "th/bigtable.data.readonly,https://www.go" + + "ogleapis.com/auth/cloud-bigtable.data,ht" + + "tps://www.googleapis.com/auth/cloud-bigt" + + "able.data.readonly,https://www.googleapi" + + "s.com/auth/cloud-platform,https://www.go" + + "ogleapis.com/auth/cloud-platform.read-on" + + "lyB\353\002\n\026com.google.bigtable.v2B\rBigtableP" + + "rotoP\001Z:google.golang.org/genproto/googl" + + "eapis/bigtable/v2;bigtable\252\002\030Google.Clou" + + "d.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" + + "\352\002\033Google::Cloud::Bigtable::V2\352A\\\n\"bigta" + + "bleadmin.googleapis.com/Table\0226projects/" + + "{project}/instances/{instance}/tables/{t" + + "able}\352AP\n%bigtableadmin.googleapis.com/I" + + "nstance\022\'projects/{project}/instances/{i" + + "nstance}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -251,6 +348,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.ResourceProto.getDescriptor(), com.google.api.RoutingProto.getDescriptor(), com.google.bigtable.v2.DataProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), com.google.protobuf.WrappersProto.getDescriptor(), com.google.rpc.StatusProto.getDescriptor(), }); @@ -400,6 +499,103 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Row", }); + internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor, + new java.lang.String[] { + "TableName", "AppProfileId", + }); + internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor, + new java.lang.String[] { + "Partition", + }); + internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_bigtable_v2_ReadChangeStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor, + new java.lang.String[] { + "TableName", + "AppProfileId", + "Partition", + "StartTime", + "ContinuationTokens", + "EndTime", + "HeartbeatDuration", + "StartFrom", + }); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor, + new java.lang.String[] { + "DataChange", "Heartbeat", "CloseStream", "StreamRecord", + }); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor = + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor + .getNestedTypes() + .get(0); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor, + new java.lang.String[] { + "ChunkInfo", "Mutation", + }); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor = + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor + .getNestedTypes() + .get(0); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor, + new java.lang.String[] { + "ChunkedValueSize", "ChunkedValueOffset", "LastChunk", + }); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor = + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor + .getNestedTypes() + .get(1); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor, + new java.lang.String[] { + "Type", + "SourceClusterId", + "RowKey", + "CommitTimestamp", + "Tiebreaker", + "Chunks", + "Done", + "Token", + "LowWatermark", + }); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor = + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor + .getNestedTypes() + .get(2); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor, + new java.lang.String[] { + "ContinuationToken", "LowWatermark", + }); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor = + internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor + .getNestedTypes() + .get(3); + internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor, + new java.lang.String[] { + "Status", "ContinuationTokens", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.ClientProto.defaultHost); @@ -418,6 +614,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.api.ResourceProto.getDescriptor(); com.google.api.RoutingProto.getDescriptor(); com.google.bigtable.v2.DataProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); com.google.protobuf.WrappersProto.getDescriptor(); com.google.rpc.StatusProto.getDescriptor(); } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/DataProto.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/DataProto.java index ba55154653..e236dc1456 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/DataProto.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/DataProto.java @@ -103,6 +103,18 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_bigtable_v2_ReadModifyWriteRule_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_bigtable_v2_ReadModifyWriteRule_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_StreamPartition_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_StreamPartition_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_StreamContinuationTokens_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_StreamContinuationTokens_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_v2_StreamContinuationToken_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_v2_StreamContinuationToken_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -182,12 +194,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "tation\"\200\001\n\023ReadModifyWriteRule\022\023\n\013family" + "_name\030\001 \001(\t\022\030\n\020column_qualifier\030\002 \001(\014\022\026\n" + "\014append_value\030\003 \001(\014H\000\022\032\n\020increment_amoun" - + "t\030\004 \001(\003H\000B\006\n\004ruleB\265\001\n\026com.google.bigtabl" - + "e.v2B\tDataProtoP\001Z:google.golang.org/gen" - + "proto/googleapis/bigtable/v2;bigtable\252\002\030" - + "Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\" - + "Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V" - + "2b\006proto3" + + "t\030\004 \001(\003H\000B\006\n\004rule\"B\n\017StreamPartition\022/\n\t" + + "row_range\030\001 \001(\0132\034.google.bigtable.v2.Row" + + "Range\"W\n\030StreamContinuationTokens\022;\n\006tok" + + "ens\030\001 \003(\0132+.google.bigtable.v2.StreamCon" + + "tinuationToken\"`\n\027StreamContinuationToke" + + "n\0226\n\tpartition\030\001 \001(\0132#.google.bigtable.v" + + "2.StreamPartition\022\r\n\005token\030\002 \001(\tB\265\001\n\026com" + + ".google.bigtable.v2B\tDataProtoP\001Z:google" + + ".golang.org/genproto/googleapis/bigtable" + + "/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312" + + "\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Clo" + + "ud::Bigtable::V2b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -367,6 +385,30 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "FamilyName", "ColumnQualifier", "AppendValue", "IncrementAmount", "Rule", }); + internal_static_google_bigtable_v2_StreamPartition_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_google_bigtable_v2_StreamPartition_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_StreamPartition_descriptor, + new java.lang.String[] { + "RowRange", + }); + internal_static_google_bigtable_v2_StreamContinuationTokens_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_google_bigtable_v2_StreamContinuationTokens_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_StreamContinuationTokens_descriptor, + new java.lang.String[] { + "Tokens", + }); + internal_static_google_bigtable_v2_StreamContinuationToken_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_google_bigtable_v2_StreamContinuationToken_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_v2_StreamContinuationToken_descriptor, + new java.lang.String[] { + "Partition", "Token", + }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java new file mode 100644 index 0000000000..38beeb41dd --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java @@ -0,0 +1,855 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * Request message for Bigtable.ListChangeStreamPartitions.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsRequest} + */ +public final class ListChangeStreamPartitionsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ListChangeStreamPartitionsRequest) + ListChangeStreamPartitionsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListChangeStreamPartitionsRequest.newBuilder() to construct. + private ListChangeStreamPartitionsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListChangeStreamPartitionsRequest() { + tableName_ = ""; + appProfileId_ = ""; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ListChangeStreamPartitionsRequest(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ListChangeStreamPartitionsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + String s = input.readStringRequireUtf8(); + + tableName_ = s; + break; + } + case 18: + { + String s = input.readStringRequireUtf8(); + + appProfileId_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsRequest.class, Builder.class); + } + + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private volatile Object tableName_; + /** + * + * + *
+   * Required. The unique name of the table from which to get change stream
+   * partitions. Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The tableName. + */ + @Override + public String getTableName() { + Object ref = tableName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + tableName_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The unique name of the table from which to get change stream
+   * partitions. Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for tableName. + */ + @Override + public com.google.protobuf.ByteString getTableNameBytes() { + Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int APP_PROFILE_ID_FIELD_NUMBER = 2; + private volatile Object appProfileId_; + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The appProfileId. + */ + @Override + public String getAppProfileId() { + Object ref = appProfileId_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + appProfileId_ = s; + return s; + } + } + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The bytes for appProfileId. + */ + @Override + public com.google.protobuf.ByteString getAppProfileIdBytes() { + Object ref = appProfileId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + appProfileId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tableName_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(appProfileId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, appProfileId_); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, tableName_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(appProfileId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, appProfileId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ListChangeStreamPartitionsRequest)) { + return super.equals(obj); + } + ListChangeStreamPartitionsRequest other = (ListChangeStreamPartitionsRequest) obj; + + if (!getTableName().equals(other.getTableName())) return false; + if (!getAppProfileId().equals(other.getAppProfileId())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + hash = (37 * hash) + APP_PROFILE_ID_FIELD_NUMBER; + hash = (53 * hash) + getAppProfileId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ListChangeStreamPartitionsRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ListChangeStreamPartitionsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ListChangeStreamPartitionsRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ListChangeStreamPartitionsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ListChangeStreamPartitionsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ListChangeStreamPartitionsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ListChangeStreamPartitionsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ListChangeStreamPartitionsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static ListChangeStreamPartitionsRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static ListChangeStreamPartitionsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static ListChangeStreamPartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ListChangeStreamPartitionsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(ListChangeStreamPartitionsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * Request message for Bigtable.ListChangeStreamPartitions.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ListChangeStreamPartitionsRequest) + ListChangeStreamPartitionsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsRequest.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ListChangeStreamPartitionsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + tableName_ = ""; + + appProfileId_ = ""; + + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + } + + @Override + public ListChangeStreamPartitionsRequest getDefaultInstanceForType() { + return ListChangeStreamPartitionsRequest.getDefaultInstance(); + } + + @Override + public ListChangeStreamPartitionsRequest build() { + ListChangeStreamPartitionsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public ListChangeStreamPartitionsRequest buildPartial() { + ListChangeStreamPartitionsRequest result = new ListChangeStreamPartitionsRequest(this); + result.tableName_ = tableName_; + result.appProfileId_ = appProfileId_; + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof ListChangeStreamPartitionsRequest) { + return mergeFrom((ListChangeStreamPartitionsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ListChangeStreamPartitionsRequest other) { + if (other == ListChangeStreamPartitionsRequest.getDefaultInstance()) return this; + if (!other.getTableName().isEmpty()) { + tableName_ = other.tableName_; + onChanged(); + } + if (!other.getAppProfileId().isEmpty()) { + appProfileId_ = other.appProfileId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ListChangeStreamPartitionsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (ListChangeStreamPartitionsRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private Object tableName_ = ""; + /** + * + * + *
+     * Required. The unique name of the table from which to get change stream
+     * partitions. Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The tableName. + */ + public String getTableName() { + Object ref = tableName_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + tableName_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+     * Required. The unique name of the table from which to get change stream
+     * partitions. Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for tableName. + */ + public com.google.protobuf.ByteString getTableNameBytes() { + Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The unique name of the table from which to get change stream
+     * partitions. Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The tableName to set. + * @return This builder for chaining. + */ + public Builder setTableName(String value) { + if (value == null) { + throw new NullPointerException(); + } + + tableName_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The unique name of the table from which to get change stream
+     * partitions. Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTableName() { + + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The unique name of the table from which to get change stream
+     * partitions. Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for tableName to set. + * @return This builder for chaining. + */ + public Builder setTableNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + tableName_ = value; + onChanged(); + return this; + } + + private Object appProfileId_ = ""; + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @return The appProfileId. + */ + public String getAppProfileId() { + Object ref = appProfileId_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + appProfileId_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @return The bytes for appProfileId. + */ + public com.google.protobuf.ByteString getAppProfileIdBytes() { + Object ref = appProfileId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + appProfileId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @param value The appProfileId to set. + * @return This builder for chaining. + */ + public Builder setAppProfileId(String value) { + if (value == null) { + throw new NullPointerException(); + } + + appProfileId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearAppProfileId() { + + appProfileId_ = getDefaultInstance().getAppProfileId(); + onChanged(); + return this; + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @param value The bytes for appProfileId to set. + * @return This builder for chaining. + */ + public Builder setAppProfileIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + appProfileId_ = value; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ListChangeStreamPartitionsRequest) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ListChangeStreamPartitionsRequest) + private static final ListChangeStreamPartitionsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new ListChangeStreamPartitionsRequest(); + } + + public static ListChangeStreamPartitionsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ListChangeStreamPartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListChangeStreamPartitionsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public ListChangeStreamPartitionsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java new file mode 100644 index 0000000000..741730e983 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java @@ -0,0 +1,89 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +public interface ListChangeStreamPartitionsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ListChangeStreamPartitionsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The unique name of the table from which to get change stream
+   * partitions. Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The tableName. + */ + String getTableName(); + /** + * + * + *
+   * Required. The unique name of the table from which to get change stream
+   * partitions. Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for tableName. + */ + com.google.protobuf.ByteString getTableNameBytes(); + + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The appProfileId. + */ + String getAppProfileId(); + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The bytes for appProfileId. + */ + com.google.protobuf.ByteString getAppProfileIdBytes(); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java new file mode 100644 index 0000000000..fd684952e7 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java @@ -0,0 +1,726 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * Response message for Bigtable.ListChangeStreamPartitions.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsResponse} + */ +public final class ListChangeStreamPartitionsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ListChangeStreamPartitionsResponse) + ListChangeStreamPartitionsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListChangeStreamPartitionsResponse.newBuilder() to construct. + private ListChangeStreamPartitionsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListChangeStreamPartitionsResponse() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ListChangeStreamPartitionsResponse(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ListChangeStreamPartitionsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.bigtable.v2.StreamPartition.Builder subBuilder = null; + if (partition_ != null) { + subBuilder = partition_.toBuilder(); + } + partition_ = + input.readMessage( + com.google.bigtable.v2.StreamPartition.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(partition_); + partition_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsResponse.class, Builder.class); + } + + public static final int PARTITION_FIELD_NUMBER = 1; + private com.google.bigtable.v2.StreamPartition partition_; + /** + * + * + *
+   * A partition of the change stream.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return Whether the partition field is set. + */ + @Override + public boolean hasPartition() { + return partition_ != null; + } + /** + * + * + *
+   * A partition of the change stream.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return The partition. + */ + @Override + public com.google.bigtable.v2.StreamPartition getPartition() { + return partition_ == null + ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() + : partition_; + } + /** + * + * + *
+   * A partition of the change stream.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + @Override + public com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder() { + return getPartition(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (partition_ != null) { + output.writeMessage(1, getPartition()); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (partition_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPartition()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ListChangeStreamPartitionsResponse)) { + return super.equals(obj); + } + ListChangeStreamPartitionsResponse other = (ListChangeStreamPartitionsResponse) obj; + + if (hasPartition() != other.hasPartition()) return false; + if (hasPartition()) { + if (!getPartition().equals(other.getPartition())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPartition()) { + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ListChangeStreamPartitionsResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ListChangeStreamPartitionsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ListChangeStreamPartitionsResponse parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ListChangeStreamPartitionsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ListChangeStreamPartitionsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ListChangeStreamPartitionsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ListChangeStreamPartitionsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ListChangeStreamPartitionsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static ListChangeStreamPartitionsResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static ListChangeStreamPartitionsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static ListChangeStreamPartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ListChangeStreamPartitionsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(ListChangeStreamPartitionsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * Response message for Bigtable.ListChangeStreamPartitions.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ListChangeStreamPartitionsResponse) + ListChangeStreamPartitionsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsResponse.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ListChangeStreamPartitionsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + if (partitionBuilder_ == null) { + partition_ = null; + } else { + partition_ = null; + partitionBuilder_ = null; + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + } + + @Override + public ListChangeStreamPartitionsResponse getDefaultInstanceForType() { + return ListChangeStreamPartitionsResponse.getDefaultInstance(); + } + + @Override + public ListChangeStreamPartitionsResponse build() { + ListChangeStreamPartitionsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public ListChangeStreamPartitionsResponse buildPartial() { + ListChangeStreamPartitionsResponse result = new ListChangeStreamPartitionsResponse(this); + if (partitionBuilder_ == null) { + result.partition_ = partition_; + } else { + result.partition_ = partitionBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof ListChangeStreamPartitionsResponse) { + return mergeFrom((ListChangeStreamPartitionsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ListChangeStreamPartitionsResponse other) { + if (other == ListChangeStreamPartitionsResponse.getDefaultInstance()) return this; + if (other.hasPartition()) { + mergePartition(other.getPartition()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ListChangeStreamPartitionsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (ListChangeStreamPartitionsResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.bigtable.v2.StreamPartition partition_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.v2.StreamPartition, + com.google.bigtable.v2.StreamPartition.Builder, + com.google.bigtable.v2.StreamPartitionOrBuilder> + partitionBuilder_; + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return Whether the partition field is set. + */ + public boolean hasPartition() { + return partitionBuilder_ != null || partition_ != null; + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return The partition. + */ + public com.google.bigtable.v2.StreamPartition getPartition() { + if (partitionBuilder_ == null) { + return partition_ == null + ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() + : partition_; + } else { + return partitionBuilder_.getMessage(); + } + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder setPartition(com.google.bigtable.v2.StreamPartition value) { + if (partitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + onChanged(); + } else { + partitionBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder setPartition(com.google.bigtable.v2.StreamPartition.Builder builderForValue) { + if (partitionBuilder_ == null) { + partition_ = builderForValue.build(); + onChanged(); + } else { + partitionBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder mergePartition(com.google.bigtable.v2.StreamPartition value) { + if (partitionBuilder_ == null) { + if (partition_ != null) { + partition_ = + com.google.bigtable.v2.StreamPartition.newBuilder(partition_) + .mergeFrom(value) + .buildPartial(); + } else { + partition_ = value; + } + onChanged(); + } else { + partitionBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder clearPartition() { + if (partitionBuilder_ == null) { + partition_ = null; + onChanged(); + } else { + partition_ = null; + partitionBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public com.google.bigtable.v2.StreamPartition.Builder getPartitionBuilder() { + + onChanged(); + return getPartitionFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder() { + if (partitionBuilder_ != null) { + return partitionBuilder_.getMessageOrBuilder(); + } else { + return partition_ == null + ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() + : partition_; + } + } + /** + * + * + *
+     * A partition of the change stream.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.v2.StreamPartition, + com.google.bigtable.v2.StreamPartition.Builder, + com.google.bigtable.v2.StreamPartitionOrBuilder> + getPartitionFieldBuilder() { + if (partitionBuilder_ == null) { + partitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.v2.StreamPartition, + com.google.bigtable.v2.StreamPartition.Builder, + com.google.bigtable.v2.StreamPartitionOrBuilder>( + getPartition(), getParentForChildren(), isClean()); + partition_ = null; + } + return partitionBuilder_; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ListChangeStreamPartitionsResponse) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ListChangeStreamPartitionsResponse) + private static final ListChangeStreamPartitionsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new ListChangeStreamPartitionsResponse(); + } + + public static ListChangeStreamPartitionsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ListChangeStreamPartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListChangeStreamPartitionsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public ListChangeStreamPartitionsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java new file mode 100644 index 0000000000..630816f767 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java @@ -0,0 +1,60 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +public interface ListChangeStreamPartitionsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ListChangeStreamPartitionsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A partition of the change stream.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return Whether the partition field is set. + */ + boolean hasPartition(); + /** + * + * + *
+   * A partition of the change stream.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return The partition. + */ + com.google.bigtable.v2.StreamPartition getPartition(); + /** + * + * + *
+   * A partition of the change stream.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder(); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java new file mode 100644 index 0000000000..14d0d9024e --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java @@ -0,0 +1,2495 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * Request message for Bigtable.ReadChangeStream.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamRequest} + */ +public final class ReadChangeStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamRequest) + ReadChangeStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReadChangeStreamRequest.newBuilder() to construct. + private ReadChangeStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadChangeStreamRequest() { + tableName_ = ""; + appProfileId_ = ""; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ReadChangeStreamRequest(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReadChangeStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + String s = input.readStringRequireUtf8(); + + tableName_ = s; + break; + } + case 18: + { + String s = input.readStringRequireUtf8(); + + appProfileId_ = s; + break; + } + case 26: + { + StreamPartition.Builder subBuilder = null; + if (partition_ != null) { + subBuilder = partition_.toBuilder(); + } + partition_ = input.readMessage(StreamPartition.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(partition_); + partition_ = subBuilder.buildPartial(); + } + + break; + } + case 34: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (startFromCase_ == 4) { + subBuilder = ((com.google.protobuf.Timestamp) startFrom_).toBuilder(); + } + startFrom_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.protobuf.Timestamp) startFrom_); + startFrom_ = subBuilder.buildPartial(); + } + startFromCase_ = 4; + break; + } + case 42: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (endTime_ != null) { + subBuilder = endTime_.toBuilder(); + } + endTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(endTime_); + endTime_ = subBuilder.buildPartial(); + } + + break; + } + case 50: + { + StreamContinuationTokens.Builder subBuilder = null; + if (startFromCase_ == 6) { + subBuilder = ((StreamContinuationTokens) startFrom_).toBuilder(); + } + startFrom_ = input.readMessage(StreamContinuationTokens.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((StreamContinuationTokens) startFrom_); + startFrom_ = subBuilder.buildPartial(); + } + startFromCase_ = 6; + break; + } + case 58: + { + com.google.protobuf.Duration.Builder subBuilder = null; + if (heartbeatDuration_ != null) { + subBuilder = heartbeatDuration_.toBuilder(); + } + heartbeatDuration_ = + input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(heartbeatDuration_); + heartbeatDuration_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized(ReadChangeStreamRequest.class, Builder.class); + } + + private int startFromCase_ = 0; + private Object startFrom_; + + public enum StartFromCase implements com.google.protobuf.Internal.EnumLite, InternalOneOfEnum { + START_TIME(4), + CONTINUATION_TOKENS(6), + STARTFROM_NOT_SET(0); + private final int value; + + private StartFromCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static StartFromCase valueOf(int value) { + return forNumber(value); + } + + public static StartFromCase forNumber(int value) { + switch (value) { + case 4: + return START_TIME; + case 6: + return CONTINUATION_TOKENS; + case 0: + return STARTFROM_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public StartFromCase getStartFromCase() { + return StartFromCase.forNumber(startFromCase_); + } + + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private volatile Object tableName_; + /** + * + * + *
+   * Required. The unique name of the table from which to read a change stream.
+   * Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The tableName. + */ + @Override + public String getTableName() { + Object ref = tableName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + tableName_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The unique name of the table from which to read a change stream.
+   * Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for tableName. + */ + @Override + public com.google.protobuf.ByteString getTableNameBytes() { + Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int APP_PROFILE_ID_FIELD_NUMBER = 2; + private volatile Object appProfileId_; + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The appProfileId. + */ + @Override + public String getAppProfileId() { + Object ref = appProfileId_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + appProfileId_ = s; + return s; + } + } + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The bytes for appProfileId. + */ + @Override + public com.google.protobuf.ByteString getAppProfileIdBytes() { + Object ref = appProfileId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + appProfileId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PARTITION_FIELD_NUMBER = 3; + private StreamPartition partition_; + /** + * + * + *
+   * The partition to read changes from.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + * + * @return Whether the partition field is set. + */ + @Override + public boolean hasPartition() { + return partition_ != null; + } + /** + * + * + *
+   * The partition to read changes from.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + * + * @return The partition. + */ + @Override + public StreamPartition getPartition() { + return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + } + /** + * + * + *
+   * The partition to read changes from.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + @Override + public StreamPartitionOrBuilder getPartitionOrBuilder() { + return getPartition(); + } + + public static final int START_TIME_FIELD_NUMBER = 4; + /** + * + * + *
+   * Start reading the stream at the specified timestamp. This timestamp must
+   * be within the change stream retention period, less than or equal to the
+   * current time, and after change stream creation, whichever is greater.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp start_time = 4; + * + * @return Whether the startTime field is set. + */ + @Override + public boolean hasStartTime() { + return startFromCase_ == 4; + } + /** + * + * + *
+   * Start reading the stream at the specified timestamp. This timestamp must
+   * be within the change stream retention period, less than or equal to the
+   * current time, and after change stream creation, whichever is greater.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp start_time = 4; + * + * @return The startTime. + */ + @Override + public com.google.protobuf.Timestamp getStartTime() { + if (startFromCase_ == 4) { + return (com.google.protobuf.Timestamp) startFrom_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + /** + * + * + *
+   * Start reading the stream at the specified timestamp. This timestamp must
+   * be within the change stream retention period, less than or equal to the
+   * current time, and after change stream creation, whichever is greater.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + @Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if (startFromCase_ == 4) { + return (com.google.protobuf.Timestamp) startFrom_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + + public static final int CONTINUATION_TOKENS_FIELD_NUMBER = 6; + /** + * + * + *
+   * Tokens that describe how to resume reading a stream where reading
+   * previously left off. If specified, changes will be read starting at the
+   * the position. Tokens are delivered on the stream as part of `Heartbeat`
+   * and `CloseStream` messages.
+   * If a single token is provided, the token’s partition must exactly match
+   * the request’s partition. If multiple tokens are provided, as in the case
+   * of a partition merge, the union of the token partitions must exactly
+   * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+   * returned.
+   * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + * + * @return Whether the continuationTokens field is set. + */ + @Override + public boolean hasContinuationTokens() { + return startFromCase_ == 6; + } + /** + * + * + *
+   * Tokens that describe how to resume reading a stream where reading
+   * previously left off. If specified, changes will be read starting at the
+   * the position. Tokens are delivered on the stream as part of `Heartbeat`
+   * and `CloseStream` messages.
+   * If a single token is provided, the token’s partition must exactly match
+   * the request’s partition. If multiple tokens are provided, as in the case
+   * of a partition merge, the union of the token partitions must exactly
+   * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+   * returned.
+   * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + * + * @return The continuationTokens. + */ + @Override + public StreamContinuationTokens getContinuationTokens() { + if (startFromCase_ == 6) { + return (StreamContinuationTokens) startFrom_; + } + return StreamContinuationTokens.getDefaultInstance(); + } + /** + * + * + *
+   * Tokens that describe how to resume reading a stream where reading
+   * previously left off. If specified, changes will be read starting at the
+   * the position. Tokens are delivered on the stream as part of `Heartbeat`
+   * and `CloseStream` messages.
+   * If a single token is provided, the token’s partition must exactly match
+   * the request’s partition. If multiple tokens are provided, as in the case
+   * of a partition merge, the union of the token partitions must exactly
+   * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+   * returned.
+   * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + @Override + public StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { + if (startFromCase_ == 6) { + return (StreamContinuationTokens) startFrom_; + } + return StreamContinuationTokens.getDefaultInstance(); + } + + public static final int END_TIME_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp endTime_; + /** + * + * + *
+   * If specified, OK will be returned when the stream advances beyond
+   * this time. Otherwise, changes will be continuously delivered on the stream.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 5; + * + * @return Whether the endTime field is set. + */ + @Override + public boolean hasEndTime() { + return endTime_ != null; + } + /** + * + * + *
+   * If specified, OK will be returned when the stream advances beyond
+   * this time. Otherwise, changes will be continuously delivered on the stream.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 5; + * + * @return The endTime. + */ + @Override + public com.google.protobuf.Timestamp getEndTime() { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + /** + * + * + *
+   * If specified, OK will be returned when the stream advances beyond
+   * this time. Otherwise, changes will be continuously delivered on the stream.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + @Override + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + return getEndTime(); + } + + public static final int HEARTBEAT_DURATION_FIELD_NUMBER = 7; + private com.google.protobuf.Duration heartbeatDuration_; + /** + * + * + *
+   * If specified, the duration between `Heartbeat` messages on the stream.
+   * Otherwise, defaults to 5 seconds.
+   * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + * + * @return Whether the heartbeatDuration field is set. + */ + @Override + public boolean hasHeartbeatDuration() { + return heartbeatDuration_ != null; + } + /** + * + * + *
+   * If specified, the duration between `Heartbeat` messages on the stream.
+   * Otherwise, defaults to 5 seconds.
+   * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + * + * @return The heartbeatDuration. + */ + @Override + public com.google.protobuf.Duration getHeartbeatDuration() { + return heartbeatDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : heartbeatDuration_; + } + /** + * + * + *
+   * If specified, the duration between `Heartbeat` messages on the stream.
+   * Otherwise, defaults to 5 seconds.
+   * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + @Override + public com.google.protobuf.DurationOrBuilder getHeartbeatDurationOrBuilder() { + return getHeartbeatDuration(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableName_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tableName_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(appProfileId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, appProfileId_); + } + if (partition_ != null) { + output.writeMessage(3, getPartition()); + } + if (startFromCase_ == 4) { + output.writeMessage(4, (com.google.protobuf.Timestamp) startFrom_); + } + if (endTime_ != null) { + output.writeMessage(5, getEndTime()); + } + if (startFromCase_ == 6) { + output.writeMessage(6, (StreamContinuationTokens) startFrom_); + } + if (heartbeatDuration_ != null) { + output.writeMessage(7, getHeartbeatDuration()); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableName_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, tableName_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(appProfileId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, appProfileId_); + } + if (partition_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getPartition()); + } + if (startFromCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.protobuf.Timestamp) startFrom_); + } + if (endTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getEndTime()); + } + if (startFromCase_ == 6) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 6, (StreamContinuationTokens) startFrom_); + } + if (heartbeatDuration_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getHeartbeatDuration()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ReadChangeStreamRequest)) { + return super.equals(obj); + } + ReadChangeStreamRequest other = (ReadChangeStreamRequest) obj; + + if (!getTableName().equals(other.getTableName())) return false; + if (!getAppProfileId().equals(other.getAppProfileId())) return false; + if (hasPartition() != other.hasPartition()) return false; + if (hasPartition()) { + if (!getPartition().equals(other.getPartition())) return false; + } + if (hasEndTime() != other.hasEndTime()) return false; + if (hasEndTime()) { + if (!getEndTime().equals(other.getEndTime())) return false; + } + if (hasHeartbeatDuration() != other.hasHeartbeatDuration()) return false; + if (hasHeartbeatDuration()) { + if (!getHeartbeatDuration().equals(other.getHeartbeatDuration())) return false; + } + if (!getStartFromCase().equals(other.getStartFromCase())) return false; + switch (startFromCase_) { + case 4: + if (!getStartTime().equals(other.getStartTime())) return false; + break; + case 6: + if (!getContinuationTokens().equals(other.getContinuationTokens())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + hash = (37 * hash) + APP_PROFILE_ID_FIELD_NUMBER; + hash = (53 * hash) + getAppProfileId().hashCode(); + if (hasPartition()) { + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + } + if (hasEndTime()) { + hash = (37 * hash) + END_TIME_FIELD_NUMBER; + hash = (53 * hash) + getEndTime().hashCode(); + } + if (hasHeartbeatDuration()) { + hash = (37 * hash) + HEARTBEAT_DURATION_FIELD_NUMBER; + hash = (53 * hash) + getHeartbeatDuration().hashCode(); + } + switch (startFromCase_) { + case 4: + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + getStartTime().hashCode(); + break; + case 6: + hash = (37 * hash) + CONTINUATION_TOKENS_FIELD_NUMBER; + hash = (53 * hash) + getContinuationTokens().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ReadChangeStreamRequest parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ReadChangeStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ReadChangeStreamRequest parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ReadChangeStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ReadChangeStreamRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ReadChangeStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ReadChangeStreamRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ReadChangeStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static ReadChangeStreamRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static ReadChangeStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static ReadChangeStreamRequest parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ReadChangeStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(ReadChangeStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * Request message for Bigtable.ReadChangeStream.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamRequest) + ReadChangeStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized(ReadChangeStreamRequest.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ReadChangeStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + tableName_ = ""; + + appProfileId_ = ""; + + if (partitionBuilder_ == null) { + partition_ = null; + } else { + partition_ = null; + partitionBuilder_ = null; + } + if (endTimeBuilder_ == null) { + endTime_ = null; + } else { + endTime_ = null; + endTimeBuilder_ = null; + } + if (heartbeatDurationBuilder_ == null) { + heartbeatDuration_ = null; + } else { + heartbeatDuration_ = null; + heartbeatDurationBuilder_ = null; + } + startFromCase_ = 0; + startFrom_ = null; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + } + + @Override + public ReadChangeStreamRequest getDefaultInstanceForType() { + return ReadChangeStreamRequest.getDefaultInstance(); + } + + @Override + public ReadChangeStreamRequest build() { + ReadChangeStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public ReadChangeStreamRequest buildPartial() { + ReadChangeStreamRequest result = new ReadChangeStreamRequest(this); + result.tableName_ = tableName_; + result.appProfileId_ = appProfileId_; + if (partitionBuilder_ == null) { + result.partition_ = partition_; + } else { + result.partition_ = partitionBuilder_.build(); + } + if (startFromCase_ == 4) { + if (startTimeBuilder_ == null) { + result.startFrom_ = startFrom_; + } else { + result.startFrom_ = startTimeBuilder_.build(); + } + } + if (startFromCase_ == 6) { + if (continuationTokensBuilder_ == null) { + result.startFrom_ = startFrom_; + } else { + result.startFrom_ = continuationTokensBuilder_.build(); + } + } + if (endTimeBuilder_ == null) { + result.endTime_ = endTime_; + } else { + result.endTime_ = endTimeBuilder_.build(); + } + if (heartbeatDurationBuilder_ == null) { + result.heartbeatDuration_ = heartbeatDuration_; + } else { + result.heartbeatDuration_ = heartbeatDurationBuilder_.build(); + } + result.startFromCase_ = startFromCase_; + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof ReadChangeStreamRequest) { + return mergeFrom((ReadChangeStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ReadChangeStreamRequest other) { + if (other == ReadChangeStreamRequest.getDefaultInstance()) return this; + if (!other.getTableName().isEmpty()) { + tableName_ = other.tableName_; + onChanged(); + } + if (!other.getAppProfileId().isEmpty()) { + appProfileId_ = other.appProfileId_; + onChanged(); + } + if (other.hasPartition()) { + mergePartition(other.getPartition()); + } + if (other.hasEndTime()) { + mergeEndTime(other.getEndTime()); + } + if (other.hasHeartbeatDuration()) { + mergeHeartbeatDuration(other.getHeartbeatDuration()); + } + switch (other.getStartFromCase()) { + case START_TIME: + { + mergeStartTime(other.getStartTime()); + break; + } + case CONTINUATION_TOKENS: + { + mergeContinuationTokens(other.getContinuationTokens()); + break; + } + case STARTFROM_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ReadChangeStreamRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (ReadChangeStreamRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int startFromCase_ = 0; + private Object startFrom_; + + public StartFromCase getStartFromCase() { + return StartFromCase.forNumber(startFromCase_); + } + + public Builder clearStartFrom() { + startFromCase_ = 0; + startFrom_ = null; + onChanged(); + return this; + } + + private Object tableName_ = ""; + /** + * + * + *
+     * Required. The unique name of the table from which to read a change stream.
+     * Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The tableName. + */ + public String getTableName() { + Object ref = tableName_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + tableName_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+     * Required. The unique name of the table from which to read a change stream.
+     * Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for tableName. + */ + public com.google.protobuf.ByteString getTableNameBytes() { + Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The unique name of the table from which to read a change stream.
+     * Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The tableName to set. + * @return This builder for chaining. + */ + public Builder setTableName(String value) { + if (value == null) { + throw new NullPointerException(); + } + + tableName_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The unique name of the table from which to read a change stream.
+     * Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTableName() { + + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The unique name of the table from which to read a change stream.
+     * Values are of the form
+     * `projects/<project>/instances/<instance>/tables/<table>`.
+     * Change streaming must be enabled on the table.
+     * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for tableName to set. + * @return This builder for chaining. + */ + public Builder setTableNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + tableName_ = value; + onChanged(); + return this; + } + + private Object appProfileId_ = ""; + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @return The appProfileId. + */ + public String getAppProfileId() { + Object ref = appProfileId_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + appProfileId_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @return The bytes for appProfileId. + */ + public com.google.protobuf.ByteString getAppProfileIdBytes() { + Object ref = appProfileId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + appProfileId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @param value The appProfileId to set. + * @return This builder for chaining. + */ + public Builder setAppProfileId(String value) { + if (value == null) { + throw new NullPointerException(); + } + + appProfileId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearAppProfileId() { + + appProfileId_ = getDefaultInstance().getAppProfileId(); + onChanged(); + return this; + } + /** + * + * + *
+     * This value specifies routing for replication. If not specified, the
+     * "default" application profile will be used.
+     * Single cluster routing must be configured on the profile.
+     * 
+ * + * string app_profile_id = 2; + * + * @param value The bytes for appProfileId to set. + * @return This builder for chaining. + */ + public Builder setAppProfileIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + appProfileId_ = value; + onChanged(); + return this; + } + + private StreamPartition partition_; + private com.google.protobuf.SingleFieldBuilderV3< + StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder> + partitionBuilder_; + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + * + * @return Whether the partition field is set. + */ + public boolean hasPartition() { + return partitionBuilder_ != null || partition_ != null; + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + * + * @return The partition. + */ + public StreamPartition getPartition() { + if (partitionBuilder_ == null) { + return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + } else { + return partitionBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + public Builder setPartition(StreamPartition value) { + if (partitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + onChanged(); + } else { + partitionBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + public Builder setPartition(StreamPartition.Builder builderForValue) { + if (partitionBuilder_ == null) { + partition_ = builderForValue.build(); + onChanged(); + } else { + partitionBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + public Builder mergePartition(StreamPartition value) { + if (partitionBuilder_ == null) { + if (partition_ != null) { + partition_ = StreamPartition.newBuilder(partition_).mergeFrom(value).buildPartial(); + } else { + partition_ = value; + } + onChanged(); + } else { + partitionBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + public Builder clearPartition() { + if (partitionBuilder_ == null) { + partition_ = null; + onChanged(); + } else { + partition_ = null; + partitionBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + public StreamPartition.Builder getPartitionBuilder() { + + onChanged(); + return getPartitionFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + public StreamPartitionOrBuilder getPartitionOrBuilder() { + if (partitionBuilder_ != null) { + return partitionBuilder_.getMessageOrBuilder(); + } else { + return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + } + } + /** + * + * + *
+     * The partition to read changes from.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder> + getPartitionFieldBuilder() { + if (partitionBuilder_ == null) { + partitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder>( + getPartition(), getParentForChildren(), isClean()); + partition_ = null; + } + return partitionBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + startTimeBuilder_; + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + * + * @return Whether the startTime field is set. + */ + @Override + public boolean hasStartTime() { + return startFromCase_ == 4; + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + * + * @return The startTime. + */ + @Override + public com.google.protobuf.Timestamp getStartTime() { + if (startTimeBuilder_ == null) { + if (startFromCase_ == 4) { + return (com.google.protobuf.Timestamp) startFrom_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } else { + if (startFromCase_ == 4) { + return startTimeBuilder_.getMessage(); + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + public Builder setStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startFrom_ = value; + onChanged(); + } else { + startTimeBuilder_.setMessage(value); + } + startFromCase_ = 4; + return this; + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + public Builder setStartTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (startTimeBuilder_ == null) { + startFrom_ = builderForValue.build(); + onChanged(); + } else { + startTimeBuilder_.setMessage(builderForValue.build()); + } + startFromCase_ = 4; + return this; + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + public Builder mergeStartTime(com.google.protobuf.Timestamp value) { + if (startTimeBuilder_ == null) { + if (startFromCase_ == 4 + && startFrom_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + startFrom_ = + com.google.protobuf.Timestamp.newBuilder((com.google.protobuf.Timestamp) startFrom_) + .mergeFrom(value) + .buildPartial(); + } else { + startFrom_ = value; + } + onChanged(); + } else { + if (startFromCase_ == 4) { + startTimeBuilder_.mergeFrom(value); + } else { + startTimeBuilder_.setMessage(value); + } + } + startFromCase_ = 4; + return this; + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + public Builder clearStartTime() { + if (startTimeBuilder_ == null) { + if (startFromCase_ == 4) { + startFromCase_ = 0; + startFrom_ = null; + onChanged(); + } + } else { + if (startFromCase_ == 4) { + startFromCase_ = 0; + startFrom_ = null; + } + startTimeBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { + return getStartTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + @Override + public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { + if ((startFromCase_ == 4) && (startTimeBuilder_ != null)) { + return startTimeBuilder_.getMessageOrBuilder(); + } else { + if (startFromCase_ == 4) { + return (com.google.protobuf.Timestamp) startFrom_; + } + return com.google.protobuf.Timestamp.getDefaultInstance(); + } + } + /** + * + * + *
+     * Start reading the stream at the specified timestamp. This timestamp must
+     * be within the change stream retention period, less than or equal to the
+     * current time, and after change stream creation, whichever is greater.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getStartTimeFieldBuilder() { + if (startTimeBuilder_ == null) { + if (!(startFromCase_ == 4)) { + startFrom_ = com.google.protobuf.Timestamp.getDefaultInstance(); + } + startTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + (com.google.protobuf.Timestamp) startFrom_, getParentForChildren(), isClean()); + startFrom_ = null; + } + startFromCase_ = 4; + onChanged(); + ; + return startTimeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + StreamContinuationTokens, + StreamContinuationTokens.Builder, + StreamContinuationTokensOrBuilder> + continuationTokensBuilder_; + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + * + * @return Whether the continuationTokens field is set. + */ + @Override + public boolean hasContinuationTokens() { + return startFromCase_ == 6; + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + * + * @return The continuationTokens. + */ + @Override + public StreamContinuationTokens getContinuationTokens() { + if (continuationTokensBuilder_ == null) { + if (startFromCase_ == 6) { + return (StreamContinuationTokens) startFrom_; + } + return StreamContinuationTokens.getDefaultInstance(); + } else { + if (startFromCase_ == 6) { + return continuationTokensBuilder_.getMessage(); + } + return StreamContinuationTokens.getDefaultInstance(); + } + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + public Builder setContinuationTokens(StreamContinuationTokens value) { + if (continuationTokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + startFrom_ = value; + onChanged(); + } else { + continuationTokensBuilder_.setMessage(value); + } + startFromCase_ = 6; + return this; + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + public Builder setContinuationTokens(StreamContinuationTokens.Builder builderForValue) { + if (continuationTokensBuilder_ == null) { + startFrom_ = builderForValue.build(); + onChanged(); + } else { + continuationTokensBuilder_.setMessage(builderForValue.build()); + } + startFromCase_ = 6; + return this; + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + public Builder mergeContinuationTokens(StreamContinuationTokens value) { + if (continuationTokensBuilder_ == null) { + if (startFromCase_ == 6 && startFrom_ != StreamContinuationTokens.getDefaultInstance()) { + startFrom_ = + StreamContinuationTokens.newBuilder((StreamContinuationTokens) startFrom_) + .mergeFrom(value) + .buildPartial(); + } else { + startFrom_ = value; + } + onChanged(); + } else { + if (startFromCase_ == 6) { + continuationTokensBuilder_.mergeFrom(value); + } else { + continuationTokensBuilder_.setMessage(value); + } + } + startFromCase_ = 6; + return this; + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + public Builder clearContinuationTokens() { + if (continuationTokensBuilder_ == null) { + if (startFromCase_ == 6) { + startFromCase_ = 0; + startFrom_ = null; + onChanged(); + } + } else { + if (startFromCase_ == 6) { + startFromCase_ = 0; + startFrom_ = null; + } + continuationTokensBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + public StreamContinuationTokens.Builder getContinuationTokensBuilder() { + return getContinuationTokensFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + @Override + public StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { + if ((startFromCase_ == 6) && (continuationTokensBuilder_ != null)) { + return continuationTokensBuilder_.getMessageOrBuilder(); + } else { + if (startFromCase_ == 6) { + return (StreamContinuationTokens) startFrom_; + } + return StreamContinuationTokens.getDefaultInstance(); + } + } + /** + * + * + *
+     * Tokens that describe how to resume reading a stream where reading
+     * previously left off. If specified, changes will be read starting at the
+     * the position. Tokens are delivered on the stream as part of `Heartbeat`
+     * and `CloseStream` messages.
+     * If a single token is provided, the token’s partition must exactly match
+     * the request’s partition. If multiple tokens are provided, as in the case
+     * of a partition merge, the union of the token partitions must exactly
+     * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+     * returned.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + private com.google.protobuf.SingleFieldBuilderV3< + StreamContinuationTokens, + StreamContinuationTokens.Builder, + StreamContinuationTokensOrBuilder> + getContinuationTokensFieldBuilder() { + if (continuationTokensBuilder_ == null) { + if (!(startFromCase_ == 6)) { + startFrom_ = StreamContinuationTokens.getDefaultInstance(); + } + continuationTokensBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + StreamContinuationTokens, + StreamContinuationTokens.Builder, + StreamContinuationTokensOrBuilder>( + (StreamContinuationTokens) startFrom_, getParentForChildren(), isClean()); + startFrom_ = null; + } + startFromCase_ = 6; + onChanged(); + ; + return continuationTokensBuilder_; + } + + private com.google.protobuf.Timestamp endTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + endTimeBuilder_; + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + * + * @return Whether the endTime field is set. + */ + public boolean hasEndTime() { + return endTimeBuilder_ != null || endTime_ != null; + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + * + * @return The endTime. + */ + public com.google.protobuf.Timestamp getEndTime() { + if (endTimeBuilder_ == null) { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } else { + return endTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + public Builder setEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + endTime_ = value; + onChanged(); + } else { + endTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + public Builder setEndTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (endTimeBuilder_ == null) { + endTime_ = builderForValue.build(); + onChanged(); + } else { + endTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + public Builder mergeEndTime(com.google.protobuf.Timestamp value) { + if (endTimeBuilder_ == null) { + if (endTime_ != null) { + endTime_ = + com.google.protobuf.Timestamp.newBuilder(endTime_).mergeFrom(value).buildPartial(); + } else { + endTime_ = value; + } + onChanged(); + } else { + endTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + public Builder clearEndTime() { + if (endTimeBuilder_ == null) { + endTime_ = null; + onChanged(); + } else { + endTime_ = null; + endTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + public com.google.protobuf.Timestamp.Builder getEndTimeBuilder() { + + onChanged(); + return getEndTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { + if (endTimeBuilder_ != null) { + return endTimeBuilder_.getMessageOrBuilder(); + } else { + return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; + } + } + /** + * + * + *
+     * If specified, OK will be returned when the stream advances beyond
+     * this time. Otherwise, changes will be continuously delivered on the stream.
+     * This value is inclusive and will be truncated to microsecond granularity.
+     * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getEndTimeFieldBuilder() { + if (endTimeBuilder_ == null) { + endTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getEndTime(), getParentForChildren(), isClean()); + endTime_ = null; + } + return endTimeBuilder_; + } + + private com.google.protobuf.Duration heartbeatDuration_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + heartbeatDurationBuilder_; + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + * + * @return Whether the heartbeatDuration field is set. + */ + public boolean hasHeartbeatDuration() { + return heartbeatDurationBuilder_ != null || heartbeatDuration_ != null; + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + * + * @return The heartbeatDuration. + */ + public com.google.protobuf.Duration getHeartbeatDuration() { + if (heartbeatDurationBuilder_ == null) { + return heartbeatDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : heartbeatDuration_; + } else { + return heartbeatDurationBuilder_.getMessage(); + } + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + public Builder setHeartbeatDuration(com.google.protobuf.Duration value) { + if (heartbeatDurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + heartbeatDuration_ = value; + onChanged(); + } else { + heartbeatDurationBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + public Builder setHeartbeatDuration(com.google.protobuf.Duration.Builder builderForValue) { + if (heartbeatDurationBuilder_ == null) { + heartbeatDuration_ = builderForValue.build(); + onChanged(); + } else { + heartbeatDurationBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + public Builder mergeHeartbeatDuration(com.google.protobuf.Duration value) { + if (heartbeatDurationBuilder_ == null) { + if (heartbeatDuration_ != null) { + heartbeatDuration_ = + com.google.protobuf.Duration.newBuilder(heartbeatDuration_) + .mergeFrom(value) + .buildPartial(); + } else { + heartbeatDuration_ = value; + } + onChanged(); + } else { + heartbeatDurationBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + public Builder clearHeartbeatDuration() { + if (heartbeatDurationBuilder_ == null) { + heartbeatDuration_ = null; + onChanged(); + } else { + heartbeatDuration_ = null; + heartbeatDurationBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + public com.google.protobuf.Duration.Builder getHeartbeatDurationBuilder() { + + onChanged(); + return getHeartbeatDurationFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + public com.google.protobuf.DurationOrBuilder getHeartbeatDurationOrBuilder() { + if (heartbeatDurationBuilder_ != null) { + return heartbeatDurationBuilder_.getMessageOrBuilder(); + } else { + return heartbeatDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : heartbeatDuration_; + } + } + /** + * + * + *
+     * If specified, the duration between `Heartbeat` messages on the stream.
+     * Otherwise, defaults to 5 seconds.
+     * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getHeartbeatDurationFieldBuilder() { + if (heartbeatDurationBuilder_ == null) { + heartbeatDurationBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getHeartbeatDuration(), getParentForChildren(), isClean()); + heartbeatDuration_ = null; + } + return heartbeatDurationBuilder_; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamRequest) + private static final ReadChangeStreamRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new ReadChangeStreamRequest(); + } + + public static ReadChangeStreamRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ReadChangeStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadChangeStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public ReadChangeStreamRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java new file mode 100644 index 0000000000..c62293965a --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java @@ -0,0 +1,308 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +public interface ReadChangeStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The unique name of the table from which to read a change stream.
+   * Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The tableName. + */ + String getTableName(); + /** + * + * + *
+   * Required. The unique name of the table from which to read a change stream.
+   * Values are of the form
+   * `projects/<project>/instances/<instance>/tables/<table>`.
+   * Change streaming must be enabled on the table.
+   * 
+ * + * + * string table_name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for tableName. + */ + com.google.protobuf.ByteString getTableNameBytes(); + + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The appProfileId. + */ + String getAppProfileId(); + /** + * + * + *
+   * This value specifies routing for replication. If not specified, the
+   * "default" application profile will be used.
+   * Single cluster routing must be configured on the profile.
+   * 
+ * + * string app_profile_id = 2; + * + * @return The bytes for appProfileId. + */ + com.google.protobuf.ByteString getAppProfileIdBytes(); + + /** + * + * + *
+   * The partition to read changes from.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + * + * @return Whether the partition field is set. + */ + boolean hasPartition(); + /** + * + * + *
+   * The partition to read changes from.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + * + * @return The partition. + */ + StreamPartition getPartition(); + /** + * + * + *
+   * The partition to read changes from.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 3; + */ + StreamPartitionOrBuilder getPartitionOrBuilder(); + + /** + * + * + *
+   * Start reading the stream at the specified timestamp. This timestamp must
+   * be within the change stream retention period, less than or equal to the
+   * current time, and after change stream creation, whichever is greater.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp start_time = 4; + * + * @return Whether the startTime field is set. + */ + boolean hasStartTime(); + /** + * + * + *
+   * Start reading the stream at the specified timestamp. This timestamp must
+   * be within the change stream retention period, less than or equal to the
+   * current time, and after change stream creation, whichever is greater.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp start_time = 4; + * + * @return The startTime. + */ + com.google.protobuf.Timestamp getStartTime(); + /** + * + * + *
+   * Start reading the stream at the specified timestamp. This timestamp must
+   * be within the change stream retention period, less than or equal to the
+   * current time, and after change stream creation, whichever is greater.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp start_time = 4; + */ + com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder(); + + /** + * + * + *
+   * Tokens that describe how to resume reading a stream where reading
+   * previously left off. If specified, changes will be read starting at the
+   * the position. Tokens are delivered on the stream as part of `Heartbeat`
+   * and `CloseStream` messages.
+   * If a single token is provided, the token’s partition must exactly match
+   * the request’s partition. If multiple tokens are provided, as in the case
+   * of a partition merge, the union of the token partitions must exactly
+   * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+   * returned.
+   * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + * + * @return Whether the continuationTokens field is set. + */ + boolean hasContinuationTokens(); + /** + * + * + *
+   * Tokens that describe how to resume reading a stream where reading
+   * previously left off. If specified, changes will be read starting at the
+   * the position. Tokens are delivered on the stream as part of `Heartbeat`
+   * and `CloseStream` messages.
+   * If a single token is provided, the token’s partition must exactly match
+   * the request’s partition. If multiple tokens are provided, as in the case
+   * of a partition merge, the union of the token partitions must exactly
+   * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+   * returned.
+   * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + * + * @return The continuationTokens. + */ + StreamContinuationTokens getContinuationTokens(); + /** + * + * + *
+   * Tokens that describe how to resume reading a stream where reading
+   * previously left off. If specified, changes will be read starting at the
+   * the position. Tokens are delivered on the stream as part of `Heartbeat`
+   * and `CloseStream` messages.
+   * If a single token is provided, the token’s partition must exactly match
+   * the request’s partition. If multiple tokens are provided, as in the case
+   * of a partition merge, the union of the token partitions must exactly
+   * cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
+   * returned.
+   * 
+ * + * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; + */ + StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder(); + + /** + * + * + *
+   * If specified, OK will be returned when the stream advances beyond
+   * this time. Otherwise, changes will be continuously delivered on the stream.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 5; + * + * @return Whether the endTime field is set. + */ + boolean hasEndTime(); + /** + * + * + *
+   * If specified, OK will be returned when the stream advances beyond
+   * this time. Otherwise, changes will be continuously delivered on the stream.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 5; + * + * @return The endTime. + */ + com.google.protobuf.Timestamp getEndTime(); + /** + * + * + *
+   * If specified, OK will be returned when the stream advances beyond
+   * this time. Otherwise, changes will be continuously delivered on the stream.
+   * This value is inclusive and will be truncated to microsecond granularity.
+   * 
+ * + * .google.protobuf.Timestamp end_time = 5; + */ + com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder(); + + /** + * + * + *
+   * If specified, the duration between `Heartbeat` messages on the stream.
+   * Otherwise, defaults to 5 seconds.
+   * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + * + * @return Whether the heartbeatDuration field is set. + */ + boolean hasHeartbeatDuration(); + /** + * + * + *
+   * If specified, the duration between `Heartbeat` messages on the stream.
+   * Otherwise, defaults to 5 seconds.
+   * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + * + * @return The heartbeatDuration. + */ + com.google.protobuf.Duration getHeartbeatDuration(); + /** + * + * + *
+   * If specified, the duration between `Heartbeat` messages on the stream.
+   * Otherwise, defaults to 5 seconds.
+   * 
+ * + * .google.protobuf.Duration heartbeat_duration = 7; + */ + com.google.protobuf.DurationOrBuilder getHeartbeatDurationOrBuilder(); + + public ReadChangeStreamRequest.StartFromCase getStartFromCase(); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java new file mode 100644 index 0000000000..757cebfca6 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java @@ -0,0 +1,8471 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * Response message for Bigtable.ReadChangeStream.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse} + */ +public final class ReadChangeStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamResponse) + ReadChangeStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReadChangeStreamResponse.newBuilder() to construct. + private ReadChangeStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadChangeStreamResponse() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ReadChangeStreamResponse(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReadChangeStreamResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + DataChange.Builder subBuilder = null; + if (streamRecordCase_ == 1) { + subBuilder = ((DataChange) streamRecord_).toBuilder(); + } + streamRecord_ = input.readMessage(DataChange.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((DataChange) streamRecord_); + streamRecord_ = subBuilder.buildPartial(); + } + streamRecordCase_ = 1; + break; + } + case 18: + { + Heartbeat.Builder subBuilder = null; + if (streamRecordCase_ == 2) { + subBuilder = ((Heartbeat) streamRecord_).toBuilder(); + } + streamRecord_ = input.readMessage(Heartbeat.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((Heartbeat) streamRecord_); + streamRecord_ = subBuilder.buildPartial(); + } + streamRecordCase_ = 2; + break; + } + case 26: + { + CloseStream.Builder subBuilder = null; + if (streamRecordCase_ == 3) { + subBuilder = ((CloseStream) streamRecord_).toBuilder(); + } + streamRecord_ = input.readMessage(CloseStream.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((CloseStream) streamRecord_); + streamRecord_ = subBuilder.buildPartial(); + } + streamRecordCase_ = 3; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized(ReadChangeStreamResponse.class, Builder.class); + } + + public interface MutationChunkOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * If set, then the mutation is a `SetCell` with a chunked value across
+     * multiple messages.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + * + * @return Whether the chunkInfo field is set. + */ + boolean hasChunkInfo(); + /** + * + * + *
+     * If set, then the mutation is a `SetCell` with a chunked value across
+     * multiple messages.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + * + * @return The chunkInfo. + */ + MutationChunk.ChunkInfo getChunkInfo(); + /** + * + * + *
+     * If set, then the mutation is a `SetCell` with a chunked value across
+     * multiple messages.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + MutationChunk.ChunkInfoOrBuilder getChunkInfoOrBuilder(); + + /** + * + * + *
+     * If this is a continuation of a chunked message (`chunked_value_offset` >
+     * 0), ignore all fields except the `SetCell`'s value and merge it with
+     * the previous message by concatenating the value fields.
+     * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + * + * @return Whether the mutation field is set. + */ + boolean hasMutation(); + /** + * + * + *
+     * If this is a continuation of a chunked message (`chunked_value_offset` >
+     * 0), ignore all fields except the `SetCell`'s value and merge it with
+     * the previous message by concatenating the value fields.
+     * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + * + * @return The mutation. + */ + Mutation getMutation(); + /** + * + * + *
+     * If this is a continuation of a chunked message (`chunked_value_offset` >
+     * 0), ignore all fields except the `SetCell`'s value and merge it with
+     * the previous message by concatenating the value fields.
+     * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + MutationOrBuilder getMutationOrBuilder(); + } + /** + * + * + *
+   * A partial or complete mutation.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.MutationChunk} + */ + public static final class MutationChunk extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) + MutationChunkOrBuilder { + private static final long serialVersionUID = 0L; + // Use MutationChunk.newBuilder() to construct. + private MutationChunk(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MutationChunk() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new MutationChunk(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private MutationChunk( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + ChunkInfo.Builder subBuilder = null; + if (chunkInfo_ != null) { + subBuilder = chunkInfo_.toBuilder(); + } + chunkInfo_ = input.readMessage(ChunkInfo.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(chunkInfo_); + chunkInfo_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + Mutation.Builder subBuilder = null; + if (mutation_ != null) { + subBuilder = mutation_.toBuilder(); + } + mutation_ = input.readMessage(Mutation.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(mutation_); + mutation_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_fieldAccessorTable + .ensureFieldAccessorsInitialized(MutationChunk.class, Builder.class); + } + + public interface ChunkInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * The total value size of all the chunks that make up the `SetCell`.
+       * 
+ * + * int32 chunked_value_size = 1; + * + * @return The chunkedValueSize. + */ + int getChunkedValueSize(); + + /** + * + * + *
+       * The byte offset of this chunk into the total value size of the
+       * mutation.
+       * 
+ * + * int32 chunked_value_offset = 2; + * + * @return The chunkedValueOffset. + */ + int getChunkedValueOffset(); + + /** + * + * + *
+       * When true, this is the last chunk of a chunked `SetCell`.
+       * 
+ * + * bool last_chunk = 3; + * + * @return The lastChunk. + */ + boolean getLastChunk(); + } + /** + * + * + *
+     * Information about the chunking of this mutation.
+     * Only `SetCell` mutations can be chunked, and all chunks for a `SetCell`
+     * will be delivered contiguously with no other mutation types interleaved.
+     * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo} + */ + public static final class ChunkInfo extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) + ChunkInfoOrBuilder { + private static final long serialVersionUID = 0L; + // Use ChunkInfo.newBuilder() to construct. + private ChunkInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ChunkInfo() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new ChunkInfo(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ChunkInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + chunkedValueSize_ = input.readInt32(); + break; + } + case 16: + { + chunkedValueOffset_ = input.readInt32(); + break; + } + case 24: + { + lastChunk_ = input.readBool(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(ChunkInfo.class, Builder.class); + } + + public static final int CHUNKED_VALUE_SIZE_FIELD_NUMBER = 1; + private int chunkedValueSize_; + /** + * + * + *
+       * The total value size of all the chunks that make up the `SetCell`.
+       * 
+ * + * int32 chunked_value_size = 1; + * + * @return The chunkedValueSize. + */ + @Override + public int getChunkedValueSize() { + return chunkedValueSize_; + } + + public static final int CHUNKED_VALUE_OFFSET_FIELD_NUMBER = 2; + private int chunkedValueOffset_; + /** + * + * + *
+       * The byte offset of this chunk into the total value size of the
+       * mutation.
+       * 
+ * + * int32 chunked_value_offset = 2; + * + * @return The chunkedValueOffset. + */ + @Override + public int getChunkedValueOffset() { + return chunkedValueOffset_; + } + + public static final int LAST_CHUNK_FIELD_NUMBER = 3; + private boolean lastChunk_; + /** + * + * + *
+       * When true, this is the last chunk of a chunked `SetCell`.
+       * 
+ * + * bool last_chunk = 3; + * + * @return The lastChunk. + */ + @Override + public boolean getLastChunk() { + return lastChunk_; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (chunkedValueSize_ != 0) { + output.writeInt32(1, chunkedValueSize_); + } + if (chunkedValueOffset_ != 0) { + output.writeInt32(2, chunkedValueOffset_); + } + if (lastChunk_ != false) { + output.writeBool(3, lastChunk_); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (chunkedValueSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, chunkedValueSize_); + } + if (chunkedValueOffset_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, chunkedValueOffset_); + } + if (lastChunk_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, lastChunk_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ChunkInfo)) { + return super.equals(obj); + } + ChunkInfo other = (ChunkInfo) obj; + + if (getChunkedValueSize() != other.getChunkedValueSize()) return false; + if (getChunkedValueOffset() != other.getChunkedValueOffset()) return false; + if (getLastChunk() != other.getLastChunk()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + CHUNKED_VALUE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getChunkedValueSize(); + hash = (37 * hash) + CHUNKED_VALUE_OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getChunkedValueOffset(); + hash = (37 * hash) + LAST_CHUNK_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getLastChunk()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ChunkInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ChunkInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ChunkInfo parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ChunkInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ChunkInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ChunkInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ChunkInfo parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ChunkInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static ChunkInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static ChunkInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static ChunkInfo parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ChunkInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(ChunkInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+       * Information about the chunking of this mutation.
+       * Only `SetCell` mutations can be chunked, and all chunks for a `SetCell`
+       * will be delivered contiguously with no other mutation types interleaved.
+       * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) + ChunkInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(ChunkInfo.class, Builder.class); + } + + // Construct using + // com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + chunkedValueSize_ = 0; + + chunkedValueOffset_ = 0; + + lastChunk_ = false; + + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; + } + + @Override + public ChunkInfo getDefaultInstanceForType() { + return ChunkInfo.getDefaultInstance(); + } + + @Override + public ChunkInfo build() { + ChunkInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public ChunkInfo buildPartial() { + ChunkInfo result = new ChunkInfo(this); + result.chunkedValueSize_ = chunkedValueSize_; + result.chunkedValueOffset_ = chunkedValueOffset_; + result.lastChunk_ = lastChunk_; + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof ChunkInfo) { + return mergeFrom((ChunkInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ChunkInfo other) { + if (other == ChunkInfo.getDefaultInstance()) return this; + if (other.getChunkedValueSize() != 0) { + setChunkedValueSize(other.getChunkedValueSize()); + } + if (other.getChunkedValueOffset() != 0) { + setChunkedValueOffset(other.getChunkedValueOffset()); + } + if (other.getLastChunk() != false) { + setLastChunk(other.getLastChunk()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ChunkInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (ChunkInfo) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int chunkedValueSize_; + /** + * + * + *
+         * The total value size of all the chunks that make up the `SetCell`.
+         * 
+ * + * int32 chunked_value_size = 1; + * + * @return The chunkedValueSize. + */ + @Override + public int getChunkedValueSize() { + return chunkedValueSize_; + } + /** + * + * + *
+         * The total value size of all the chunks that make up the `SetCell`.
+         * 
+ * + * int32 chunked_value_size = 1; + * + * @param value The chunkedValueSize to set. + * @return This builder for chaining. + */ + public Builder setChunkedValueSize(int value) { + + chunkedValueSize_ = value; + onChanged(); + return this; + } + /** + * + * + *
+         * The total value size of all the chunks that make up the `SetCell`.
+         * 
+ * + * int32 chunked_value_size = 1; + * + * @return This builder for chaining. + */ + public Builder clearChunkedValueSize() { + + chunkedValueSize_ = 0; + onChanged(); + return this; + } + + private int chunkedValueOffset_; + /** + * + * + *
+         * The byte offset of this chunk into the total value size of the
+         * mutation.
+         * 
+ * + * int32 chunked_value_offset = 2; + * + * @return The chunkedValueOffset. + */ + @Override + public int getChunkedValueOffset() { + return chunkedValueOffset_; + } + /** + * + * + *
+         * The byte offset of this chunk into the total value size of the
+         * mutation.
+         * 
+ * + * int32 chunked_value_offset = 2; + * + * @param value The chunkedValueOffset to set. + * @return This builder for chaining. + */ + public Builder setChunkedValueOffset(int value) { + + chunkedValueOffset_ = value; + onChanged(); + return this; + } + /** + * + * + *
+         * The byte offset of this chunk into the total value size of the
+         * mutation.
+         * 
+ * + * int32 chunked_value_offset = 2; + * + * @return This builder for chaining. + */ + public Builder clearChunkedValueOffset() { + + chunkedValueOffset_ = 0; + onChanged(); + return this; + } + + private boolean lastChunk_; + /** + * + * + *
+         * When true, this is the last chunk of a chunked `SetCell`.
+         * 
+ * + * bool last_chunk = 3; + * + * @return The lastChunk. + */ + @Override + public boolean getLastChunk() { + return lastChunk_; + } + /** + * + * + *
+         * When true, this is the last chunk of a chunked `SetCell`.
+         * 
+ * + * bool last_chunk = 3; + * + * @param value The lastChunk to set. + * @return This builder for chaining. + */ + public Builder setLastChunk(boolean value) { + + lastChunk_ = value; + onChanged(); + return this; + } + /** + * + * + *
+         * When true, this is the last chunk of a chunked `SetCell`.
+         * 
+ * + * bool last_chunk = 3; + * + * @return This builder for chaining. + */ + public Builder clearLastChunk() { + + lastChunk_ = false; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) + private static final ChunkInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new ChunkInfo(); + } + + public static ChunkInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ChunkInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ChunkInfo(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public ChunkInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int CHUNK_INFO_FIELD_NUMBER = 1; + private ChunkInfo chunkInfo_; + /** + * + * + *
+     * If set, then the mutation is a `SetCell` with a chunked value across
+     * multiple messages.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + * + * @return Whether the chunkInfo field is set. + */ + @Override + public boolean hasChunkInfo() { + return chunkInfo_ != null; + } + /** + * + * + *
+     * If set, then the mutation is a `SetCell` with a chunked value across
+     * multiple messages.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + * + * @return The chunkInfo. + */ + @Override + public ChunkInfo getChunkInfo() { + return chunkInfo_ == null ? ChunkInfo.getDefaultInstance() : chunkInfo_; + } + /** + * + * + *
+     * If set, then the mutation is a `SetCell` with a chunked value across
+     * multiple messages.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + @Override + public ChunkInfoOrBuilder getChunkInfoOrBuilder() { + return getChunkInfo(); + } + + public static final int MUTATION_FIELD_NUMBER = 2; + private Mutation mutation_; + /** + * + * + *
+     * If this is a continuation of a chunked message (`chunked_value_offset` >
+     * 0), ignore all fields except the `SetCell`'s value and merge it with
+     * the previous message by concatenating the value fields.
+     * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + * + * @return Whether the mutation field is set. + */ + @Override + public boolean hasMutation() { + return mutation_ != null; + } + /** + * + * + *
+     * If this is a continuation of a chunked message (`chunked_value_offset` >
+     * 0), ignore all fields except the `SetCell`'s value and merge it with
+     * the previous message by concatenating the value fields.
+     * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + * + * @return The mutation. + */ + @Override + public Mutation getMutation() { + return mutation_ == null ? Mutation.getDefaultInstance() : mutation_; + } + /** + * + * + *
+     * If this is a continuation of a chunked message (`chunked_value_offset` >
+     * 0), ignore all fields except the `SetCell`'s value and merge it with
+     * the previous message by concatenating the value fields.
+     * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + @Override + public MutationOrBuilder getMutationOrBuilder() { + return getMutation(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (chunkInfo_ != null) { + output.writeMessage(1, getChunkInfo()); + } + if (mutation_ != null) { + output.writeMessage(2, getMutation()); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (chunkInfo_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getChunkInfo()); + } + if (mutation_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getMutation()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof MutationChunk)) { + return super.equals(obj); + } + MutationChunk other = (MutationChunk) obj; + + if (hasChunkInfo() != other.hasChunkInfo()) return false; + if (hasChunkInfo()) { + if (!getChunkInfo().equals(other.getChunkInfo())) return false; + } + if (hasMutation() != other.hasMutation()) return false; + if (hasMutation()) { + if (!getMutation().equals(other.getMutation())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasChunkInfo()) { + hash = (37 * hash) + CHUNK_INFO_FIELD_NUMBER; + hash = (53 * hash) + getChunkInfo().hashCode(); + } + if (hasMutation()) { + hash = (37 * hash) + MUTATION_FIELD_NUMBER; + hash = (53 * hash) + getMutation().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static MutationChunk parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static MutationChunk parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static MutationChunk parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static MutationChunk parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static MutationChunk parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static MutationChunk parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static MutationChunk parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static MutationChunk parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static MutationChunk parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static MutationChunk parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static MutationChunk parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static MutationChunk parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(MutationChunk prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * A partial or complete mutation.
+     * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.MutationChunk} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) + MutationChunkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_fieldAccessorTable + .ensureFieldAccessorsInitialized(MutationChunk.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + if (chunkInfoBuilder_ == null) { + chunkInfo_ = null; + } else { + chunkInfo_ = null; + chunkInfoBuilder_ = null; + } + if (mutationBuilder_ == null) { + mutation_ = null; + } else { + mutation_ = null; + mutationBuilder_ = null; + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; + } + + @Override + public MutationChunk getDefaultInstanceForType() { + return MutationChunk.getDefaultInstance(); + } + + @Override + public MutationChunk build() { + MutationChunk result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public MutationChunk buildPartial() { + MutationChunk result = new MutationChunk(this); + if (chunkInfoBuilder_ == null) { + result.chunkInfo_ = chunkInfo_; + } else { + result.chunkInfo_ = chunkInfoBuilder_.build(); + } + if (mutationBuilder_ == null) { + result.mutation_ = mutation_; + } else { + result.mutation_ = mutationBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof MutationChunk) { + return mergeFrom((MutationChunk) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(MutationChunk other) { + if (other == MutationChunk.getDefaultInstance()) return this; + if (other.hasChunkInfo()) { + mergeChunkInfo(other.getChunkInfo()); + } + if (other.hasMutation()) { + mergeMutation(other.getMutation()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + MutationChunk parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (MutationChunk) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private ChunkInfo chunkInfo_; + private com.google.protobuf.SingleFieldBuilderV3< + ChunkInfo, ChunkInfo.Builder, ChunkInfoOrBuilder> + chunkInfoBuilder_; + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + * + * @return Whether the chunkInfo field is set. + */ + public boolean hasChunkInfo() { + return chunkInfoBuilder_ != null || chunkInfo_ != null; + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + * + * @return The chunkInfo. + */ + public ChunkInfo getChunkInfo() { + if (chunkInfoBuilder_ == null) { + return chunkInfo_ == null ? ChunkInfo.getDefaultInstance() : chunkInfo_; + } else { + return chunkInfoBuilder_.getMessage(); + } + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + public Builder setChunkInfo(ChunkInfo value) { + if (chunkInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + chunkInfo_ = value; + onChanged(); + } else { + chunkInfoBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + public Builder setChunkInfo(ChunkInfo.Builder builderForValue) { + if (chunkInfoBuilder_ == null) { + chunkInfo_ = builderForValue.build(); + onChanged(); + } else { + chunkInfoBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + public Builder mergeChunkInfo(ChunkInfo value) { + if (chunkInfoBuilder_ == null) { + if (chunkInfo_ != null) { + chunkInfo_ = ChunkInfo.newBuilder(chunkInfo_).mergeFrom(value).buildPartial(); + } else { + chunkInfo_ = value; + } + onChanged(); + } else { + chunkInfoBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + public Builder clearChunkInfo() { + if (chunkInfoBuilder_ == null) { + chunkInfo_ = null; + onChanged(); + } else { + chunkInfo_ = null; + chunkInfoBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + public ChunkInfo.Builder getChunkInfoBuilder() { + + onChanged(); + return getChunkInfoFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + public ChunkInfoOrBuilder getChunkInfoOrBuilder() { + if (chunkInfoBuilder_ != null) { + return chunkInfoBuilder_.getMessageOrBuilder(); + } else { + return chunkInfo_ == null ? ChunkInfo.getDefaultInstance() : chunkInfo_; + } + } + /** + * + * + *
+       * If set, then the mutation is a `SetCell` with a chunked value across
+       * multiple messages.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + ChunkInfo, ChunkInfo.Builder, ChunkInfoOrBuilder> + getChunkInfoFieldBuilder() { + if (chunkInfoBuilder_ == null) { + chunkInfoBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + ChunkInfo, ChunkInfo.Builder, ChunkInfoOrBuilder>( + getChunkInfo(), getParentForChildren(), isClean()); + chunkInfo_ = null; + } + return chunkInfoBuilder_; + } + + private Mutation mutation_; + private com.google.protobuf.SingleFieldBuilderV3< + Mutation, Mutation.Builder, MutationOrBuilder> + mutationBuilder_; + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + * + * @return Whether the mutation field is set. + */ + public boolean hasMutation() { + return mutationBuilder_ != null || mutation_ != null; + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + * + * @return The mutation. + */ + public Mutation getMutation() { + if (mutationBuilder_ == null) { + return mutation_ == null ? Mutation.getDefaultInstance() : mutation_; + } else { + return mutationBuilder_.getMessage(); + } + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + public Builder setMutation(Mutation value) { + if (mutationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mutation_ = value; + onChanged(); + } else { + mutationBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + public Builder setMutation(Mutation.Builder builderForValue) { + if (mutationBuilder_ == null) { + mutation_ = builderForValue.build(); + onChanged(); + } else { + mutationBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + public Builder mergeMutation(Mutation value) { + if (mutationBuilder_ == null) { + if (mutation_ != null) { + mutation_ = Mutation.newBuilder(mutation_).mergeFrom(value).buildPartial(); + } else { + mutation_ = value; + } + onChanged(); + } else { + mutationBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + public Builder clearMutation() { + if (mutationBuilder_ == null) { + mutation_ = null; + onChanged(); + } else { + mutation_ = null; + mutationBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + public Mutation.Builder getMutationBuilder() { + + onChanged(); + return getMutationFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + public MutationOrBuilder getMutationOrBuilder() { + if (mutationBuilder_ != null) { + return mutationBuilder_.getMessageOrBuilder(); + } else { + return mutation_ == null ? Mutation.getDefaultInstance() : mutation_; + } + } + /** + * + * + *
+       * If this is a continuation of a chunked message (`chunked_value_offset` >
+       * 0), ignore all fields except the `SetCell`'s value and merge it with
+       * the previous message by concatenating the value fields.
+       * 
+ * + * .google.bigtable.v2.Mutation mutation = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + Mutation, Mutation.Builder, MutationOrBuilder> + getMutationFieldBuilder() { + if (mutationBuilder_ == null) { + mutationBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + Mutation, Mutation.Builder, MutationOrBuilder>( + getMutation(), getParentForChildren(), isClean()); + mutation_ = null; + } + return mutationBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) + private static final MutationChunk DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new MutationChunk(); + } + + public static MutationChunk getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public MutationChunk parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MutationChunk(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public MutationChunk getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface DataChangeOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamResponse.DataChange) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The type of the mutation.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + /** + * + * + *
+     * The type of the mutation.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return The type. + */ + DataChange.Type getType(); + + /** + * + * + *
+     * The cluster where the mutation was applied.
+     * Not set when `type` is `GARBAGE_COLLECTION`.
+     * 
+ * + * string source_cluster_id = 2; + * + * @return The sourceClusterId. + */ + String getSourceClusterId(); + /** + * + * + *
+     * The cluster where the mutation was applied.
+     * Not set when `type` is `GARBAGE_COLLECTION`.
+     * 
+ * + * string source_cluster_id = 2; + * + * @return The bytes for sourceClusterId. + */ + com.google.protobuf.ByteString getSourceClusterIdBytes(); + + /** + * + * + *
+     * The row key for all mutations that are part of this `DataChange`.
+     * If the `DataChange` is chunked across multiple messages, then this field
+     * will only be set for the first message.
+     * 
+ * + * bytes row_key = 3; + * + * @return The rowKey. + */ + com.google.protobuf.ByteString getRowKey(); + + /** + * + * + *
+     * The timestamp at which the mutation was applied on the Bigtable server.
+     * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + * + * @return Whether the commitTimestamp field is set. + */ + boolean hasCommitTimestamp(); + /** + * + * + *
+     * The timestamp at which the mutation was applied on the Bigtable server.
+     * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + * + * @return The commitTimestamp. + */ + com.google.protobuf.Timestamp getCommitTimestamp(); + /** + * + * + *
+     * The timestamp at which the mutation was applied on the Bigtable server.
+     * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder(); + + /** + * + * + *
+     * A value that lets stream consumers reconstruct Bigtable's
+     * conflict resolution semantics.
+     * https://cloud.google.com/bigtable/docs/writes#conflict-resolution
+     * In the event that the same row key, column family, column qualifier,
+     * timestamp are modified on different clusters at the same
+     * `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
+     * one chosen for the eventually consistent state of the system.
+     * 
+ * + * int32 tiebreaker = 5; + * + * @return The tiebreaker. + */ + int getTiebreaker(); + + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + java.util.List getChunksList(); + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + MutationChunk getChunks(int index); + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + int getChunksCount(); + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + java.util.List getChunksOrBuilderList(); + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + MutationChunkOrBuilder getChunksOrBuilder(int index); + + /** + * + * + *
+     * When true, indicates that the entire `DataChange` has been read
+     * and the client can safely process the message.
+     * 
+ * + * bool done = 8; + * + * @return The done. + */ + boolean getDone(); + + /** + * + * + *
+     * An encoded position for this stream's partition to restart reading from.
+     * This token is for the StreamPartition from the request.
+     * 
+ * + * string token = 9; + * + * @return The token. + */ + String getToken(); + /** + * + * + *
+     * An encoded position for this stream's partition to restart reading from.
+     * This token is for the StreamPartition from the request.
+     * 
+ * + * string token = 9; + * + * @return The bytes for token. + */ + com.google.protobuf.ByteString getTokenBytes(); + + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + * + * @return Whether the lowWatermark field is set. + */ + boolean hasLowWatermark(); + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + * + * @return The lowWatermark. + */ + com.google.protobuf.Timestamp getLowWatermark(); + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder(); + } + /** + * + * + *
+   * A message corresponding to one or more mutations to the partition
+   * being streamed. A single logical `DataChange` message may also be split
+   * across a sequence of multiple individual messages. Messages other than
+   * the first in a sequence will only have the `type` and `chunks` fields
+   * populated, with the final message in the sequence also containing `done`
+   * set to true.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.DataChange} + */ + public static final class DataChange extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamResponse.DataChange) + DataChangeOrBuilder { + private static final long serialVersionUID = 0L; + // Use DataChange.newBuilder() to construct. + private DataChange(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DataChange() { + type_ = 0; + sourceClusterId_ = ""; + rowKey_ = com.google.protobuf.ByteString.EMPTY; + chunks_ = java.util.Collections.emptyList(); + token_ = ""; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new DataChange(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private DataChange( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + int rawValue = input.readEnum(); + + type_ = rawValue; + break; + } + case 18: + { + String s = input.readStringRequireUtf8(); + + sourceClusterId_ = s; + break; + } + case 26: + { + rowKey_ = input.readBytes(); + break; + } + case 34: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (commitTimestamp_ != null) { + subBuilder = commitTimestamp_.toBuilder(); + } + commitTimestamp_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(commitTimestamp_); + commitTimestamp_ = subBuilder.buildPartial(); + } + + break; + } + case 40: + { + tiebreaker_ = input.readInt32(); + break; + } + case 50: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + chunks_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + chunks_.add(input.readMessage(MutationChunk.parser(), extensionRegistry)); + break; + } + case 64: + { + done_ = input.readBool(); + break; + } + case 74: + { + String s = input.readStringRequireUtf8(); + + token_ = s; + break; + } + case 82: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (lowWatermark_ != null) { + subBuilder = lowWatermark_.toBuilder(); + } + lowWatermark_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lowWatermark_); + lowWatermark_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + chunks_ = java.util.Collections.unmodifiableList(chunks_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_fieldAccessorTable + .ensureFieldAccessorsInitialized(DataChange.class, Builder.class); + } + + /** + * + * + *
+     * The type of mutation.
+     * 
+ * + * Protobuf enum {@code google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type} + */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * The type is unspecified.
+       * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+       * A user-initiated mutation.
+       * 
+ * + * USER = 1; + */ + USER(1), + /** + * + * + *
+       * A system-initiated mutation as part of garbage collection.
+       * https://cloud.google.com/bigtable/docs/garbage-collection
+       * 
+ * + * GARBAGE_COLLECTION = 2; + */ + GARBAGE_COLLECTION(2), + /** + * + * + *
+       * This is a continuation of a multi-message change.
+       * 
+ * + * CONTINUATION = 3; + */ + CONTINUATION(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+       * The type is unspecified.
+       * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+       * A user-initiated mutation.
+       * 
+ * + * USER = 1; + */ + public static final int USER_VALUE = 1; + /** + * + * + *
+       * A system-initiated mutation as part of garbage collection.
+       * https://cloud.google.com/bigtable/docs/garbage-collection
+       * 
+ * + * GARBAGE_COLLECTION = 2; + */ + public static final int GARBAGE_COLLECTION_VALUE = 2; + /** + * + * + *
+       * This is a continuation of a multi-message change.
+       * 
+ * + * CONTINUATION = 3; + */ + public static final int CONTINUATION_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException("Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return USER; + case 2: + return GARBAGE_COLLECTION; + case 3: + return CONTINUATION; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return DataChange.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type) + } + + public static final int TYPE_FIELD_NUMBER = 1; + private int type_; + /** + * + * + *
+     * The type of the mutation.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return The enum numeric value on the wire for type. + */ + @Override + public int getTypeValue() { + return type_; + } + /** + * + * + *
+     * The type of the mutation.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return The type. + */ + @Override + public Type getType() { + @SuppressWarnings("deprecation") + Type result = Type.valueOf(type_); + return result == null ? Type.UNRECOGNIZED : result; + } + + public static final int SOURCE_CLUSTER_ID_FIELD_NUMBER = 2; + private volatile Object sourceClusterId_; + /** + * + * + *
+     * The cluster where the mutation was applied.
+     * Not set when `type` is `GARBAGE_COLLECTION`.
+     * 
+ * + * string source_cluster_id = 2; + * + * @return The sourceClusterId. + */ + @Override + public String getSourceClusterId() { + Object ref = sourceClusterId_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + sourceClusterId_ = s; + return s; + } + } + /** + * + * + *
+     * The cluster where the mutation was applied.
+     * Not set when `type` is `GARBAGE_COLLECTION`.
+     * 
+ * + * string source_cluster_id = 2; + * + * @return The bytes for sourceClusterId. + */ + @Override + public com.google.protobuf.ByteString getSourceClusterIdBytes() { + Object ref = sourceClusterId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + sourceClusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ROW_KEY_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString rowKey_; + /** + * + * + *
+     * The row key for all mutations that are part of this `DataChange`.
+     * If the `DataChange` is chunked across multiple messages, then this field
+     * will only be set for the first message.
+     * 
+ * + * bytes row_key = 3; + * + * @return The rowKey. + */ + @Override + public com.google.protobuf.ByteString getRowKey() { + return rowKey_; + } + + public static final int COMMIT_TIMESTAMP_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp commitTimestamp_; + /** + * + * + *
+     * The timestamp at which the mutation was applied on the Bigtable server.
+     * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + * + * @return Whether the commitTimestamp field is set. + */ + @Override + public boolean hasCommitTimestamp() { + return commitTimestamp_ != null; + } + /** + * + * + *
+     * The timestamp at which the mutation was applied on the Bigtable server.
+     * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + * + * @return The commitTimestamp. + */ + @Override + public com.google.protobuf.Timestamp getCommitTimestamp() { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + /** + * + * + *
+     * The timestamp at which the mutation was applied on the Bigtable server.
+     * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + @Override + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + return getCommitTimestamp(); + } + + public static final int TIEBREAKER_FIELD_NUMBER = 5; + private int tiebreaker_; + /** + * + * + *
+     * A value that lets stream consumers reconstruct Bigtable's
+     * conflict resolution semantics.
+     * https://cloud.google.com/bigtable/docs/writes#conflict-resolution
+     * In the event that the same row key, column family, column qualifier,
+     * timestamp are modified on different clusters at the same
+     * `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
+     * one chosen for the eventually consistent state of the system.
+     * 
+ * + * int32 tiebreaker = 5; + * + * @return The tiebreaker. + */ + @Override + public int getTiebreaker() { + return tiebreaker_; + } + + public static final int CHUNKS_FIELD_NUMBER = 6; + private java.util.List chunks_; + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + @Override + public java.util.List getChunksList() { + return chunks_; + } + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + @Override + public java.util.List getChunksOrBuilderList() { + return chunks_; + } + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + @Override + public int getChunksCount() { + return chunks_.size(); + } + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + @Override + public MutationChunk getChunks(int index) { + return chunks_.get(index); + } + /** + * + * + *
+     * The mutations associated with this change to the partition.
+     * May contain complete mutations or chunks of a multi-message chunked
+     * `DataChange` record.
+     * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + */ + @Override + public MutationChunkOrBuilder getChunksOrBuilder(int index) { + return chunks_.get(index); + } + + public static final int DONE_FIELD_NUMBER = 8; + private boolean done_; + /** + * + * + *
+     * When true, indicates that the entire `DataChange` has been read
+     * and the client can safely process the message.
+     * 
+ * + * bool done = 8; + * + * @return The done. + */ + @Override + public boolean getDone() { + return done_; + } + + public static final int TOKEN_FIELD_NUMBER = 9; + private volatile Object token_; + /** + * + * + *
+     * An encoded position for this stream's partition to restart reading from.
+     * This token is for the StreamPartition from the request.
+     * 
+ * + * string token = 9; + * + * @return The token. + */ + @Override + public String getToken() { + Object ref = token_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + token_ = s; + return s; + } + } + /** + * + * + *
+     * An encoded position for this stream's partition to restart reading from.
+     * This token is for the StreamPartition from the request.
+     * 
+ * + * string token = 9; + * + * @return The bytes for token. + */ + @Override + public com.google.protobuf.ByteString getTokenBytes() { + Object ref = token_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + token_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int LOW_WATERMARK_FIELD_NUMBER = 10; + private com.google.protobuf.Timestamp lowWatermark_; + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + * + * @return Whether the lowWatermark field is set. + */ + @Override + public boolean hasLowWatermark() { + return lowWatermark_ != null; + } + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + * + * @return The lowWatermark. + */ + @Override + public com.google.protobuf.Timestamp getLowWatermark() { + return lowWatermark_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lowWatermark_; + } + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + @Override + public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { + return getLowWatermark(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (type_ != Type.TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, type_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(sourceClusterId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, sourceClusterId_); + } + if (!rowKey_.isEmpty()) { + output.writeBytes(3, rowKey_); + } + if (commitTimestamp_ != null) { + output.writeMessage(4, getCommitTimestamp()); + } + if (tiebreaker_ != 0) { + output.writeInt32(5, tiebreaker_); + } + for (int i = 0; i < chunks_.size(); i++) { + output.writeMessage(6, chunks_.get(i)); + } + if (done_ != false) { + output.writeBool(8, done_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(token_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 9, token_); + } + if (lowWatermark_ != null) { + output.writeMessage(10, getLowWatermark()); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (type_ != Type.TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, type_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(sourceClusterId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, sourceClusterId_); + } + if (!rowKey_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(3, rowKey_); + } + if (commitTimestamp_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCommitTimestamp()); + } + if (tiebreaker_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(5, tiebreaker_); + } + for (int i = 0; i < chunks_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, chunks_.get(i)); + } + if (done_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, done_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(token_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(9, token_); + } + if (lowWatermark_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, getLowWatermark()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof DataChange)) { + return super.equals(obj); + } + DataChange other = (DataChange) obj; + + if (type_ != other.type_) return false; + if (!getSourceClusterId().equals(other.getSourceClusterId())) return false; + if (!getRowKey().equals(other.getRowKey())) return false; + if (hasCommitTimestamp() != other.hasCommitTimestamp()) return false; + if (hasCommitTimestamp()) { + if (!getCommitTimestamp().equals(other.getCommitTimestamp())) return false; + } + if (getTiebreaker() != other.getTiebreaker()) return false; + if (!getChunksList().equals(other.getChunksList())) return false; + if (getDone() != other.getDone()) return false; + if (!getToken().equals(other.getToken())) return false; + if (hasLowWatermark() != other.hasLowWatermark()) return false; + if (hasLowWatermark()) { + if (!getLowWatermark().equals(other.getLowWatermark())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + SOURCE_CLUSTER_ID_FIELD_NUMBER; + hash = (53 * hash) + getSourceClusterId().hashCode(); + hash = (37 * hash) + ROW_KEY_FIELD_NUMBER; + hash = (53 * hash) + getRowKey().hashCode(); + if (hasCommitTimestamp()) { + hash = (37 * hash) + COMMIT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getCommitTimestamp().hashCode(); + } + hash = (37 * hash) + TIEBREAKER_FIELD_NUMBER; + hash = (53 * hash) + getTiebreaker(); + if (getChunksCount() > 0) { + hash = (37 * hash) + CHUNKS_FIELD_NUMBER; + hash = (53 * hash) + getChunksList().hashCode(); + } + hash = (37 * hash) + DONE_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDone()); + hash = (37 * hash) + TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getToken().hashCode(); + if (hasLowWatermark()) { + hash = (37 * hash) + LOW_WATERMARK_FIELD_NUMBER; + hash = (53 * hash) + getLowWatermark().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static DataChange parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static DataChange parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static DataChange parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static DataChange parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static DataChange parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static DataChange parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static DataChange parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static DataChange parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static DataChange parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static DataChange parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static DataChange parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static DataChange parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(DataChange prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * A message corresponding to one or more mutations to the partition
+     * being streamed. A single logical `DataChange` message may also be split
+     * across a sequence of multiple individual messages. Messages other than
+     * the first in a sequence will only have the `type` and `chunks` fields
+     * populated, with the final message in the sequence also containing `done`
+     * set to true.
+     * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.DataChange} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.DataChange) + DataChangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_fieldAccessorTable + .ensureFieldAccessorsInitialized(DataChange.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getChunksFieldBuilder(); + } + } + + @Override + public Builder clear() { + super.clear(); + type_ = 0; + + sourceClusterId_ = ""; + + rowKey_ = com.google.protobuf.ByteString.EMPTY; + + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = null; + } else { + commitTimestamp_ = null; + commitTimestampBuilder_ = null; + } + tiebreaker_ = 0; + + if (chunksBuilder_ == null) { + chunks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + chunksBuilder_.clear(); + } + done_ = false; + + token_ = ""; + + if (lowWatermarkBuilder_ == null) { + lowWatermark_ = null; + } else { + lowWatermark_ = null; + lowWatermarkBuilder_ = null; + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; + } + + @Override + public DataChange getDefaultInstanceForType() { + return DataChange.getDefaultInstance(); + } + + @Override + public DataChange build() { + DataChange result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public DataChange buildPartial() { + DataChange result = new DataChange(this); + int from_bitField0_ = bitField0_; + result.type_ = type_; + result.sourceClusterId_ = sourceClusterId_; + result.rowKey_ = rowKey_; + if (commitTimestampBuilder_ == null) { + result.commitTimestamp_ = commitTimestamp_; + } else { + result.commitTimestamp_ = commitTimestampBuilder_.build(); + } + result.tiebreaker_ = tiebreaker_; + if (chunksBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + chunks_ = java.util.Collections.unmodifiableList(chunks_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.chunks_ = chunks_; + } else { + result.chunks_ = chunksBuilder_.build(); + } + result.done_ = done_; + result.token_ = token_; + if (lowWatermarkBuilder_ == null) { + result.lowWatermark_ = lowWatermark_; + } else { + result.lowWatermark_ = lowWatermarkBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof DataChange) { + return mergeFrom((DataChange) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(DataChange other) { + if (other == DataChange.getDefaultInstance()) return this; + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (!other.getSourceClusterId().isEmpty()) { + sourceClusterId_ = other.sourceClusterId_; + onChanged(); + } + if (other.getRowKey() != com.google.protobuf.ByteString.EMPTY) { + setRowKey(other.getRowKey()); + } + if (other.hasCommitTimestamp()) { + mergeCommitTimestamp(other.getCommitTimestamp()); + } + if (other.getTiebreaker() != 0) { + setTiebreaker(other.getTiebreaker()); + } + if (chunksBuilder_ == null) { + if (!other.chunks_.isEmpty()) { + if (chunks_.isEmpty()) { + chunks_ = other.chunks_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureChunksIsMutable(); + chunks_.addAll(other.chunks_); + } + onChanged(); + } + } else { + if (!other.chunks_.isEmpty()) { + if (chunksBuilder_.isEmpty()) { + chunksBuilder_.dispose(); + chunksBuilder_ = null; + chunks_ = other.chunks_; + bitField0_ = (bitField0_ & ~0x00000001); + chunksBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getChunksFieldBuilder() + : null; + } else { + chunksBuilder_.addAllMessages(other.chunks_); + } + } + } + if (other.getDone() != false) { + setDone(other.getDone()); + } + if (!other.getToken().isEmpty()) { + token_ = other.token_; + onChanged(); + } + if (other.hasLowWatermark()) { + mergeLowWatermark(other.getLowWatermark()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + DataChange parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (DataChange) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private int type_ = 0; + /** + * + * + *
+       * The type of the mutation.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return The enum numeric value on the wire for type. + */ + @Override + public int getTypeValue() { + return type_; + } + /** + * + * + *
+       * The type of the mutation.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + + type_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * The type of the mutation.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return The type. + */ + @Override + public Type getType() { + @SuppressWarnings("deprecation") + Type result = Type.valueOf(type_); + return result == null ? Type.UNRECOGNIZED : result; + } + /** + * + * + *
+       * The type of the mutation.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(Type value) { + if (value == null) { + throw new NullPointerException(); + } + + type_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+       * The type of the mutation.
+       * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 1; + * + * @return This builder for chaining. + */ + public Builder clearType() { + + type_ = 0; + onChanged(); + return this; + } + + private Object sourceClusterId_ = ""; + /** + * + * + *
+       * The cluster where the mutation was applied.
+       * Not set when `type` is `GARBAGE_COLLECTION`.
+       * 
+ * + * string source_cluster_id = 2; + * + * @return The sourceClusterId. + */ + public String getSourceClusterId() { + Object ref = sourceClusterId_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + sourceClusterId_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+       * The cluster where the mutation was applied.
+       * Not set when `type` is `GARBAGE_COLLECTION`.
+       * 
+ * + * string source_cluster_id = 2; + * + * @return The bytes for sourceClusterId. + */ + public com.google.protobuf.ByteString getSourceClusterIdBytes() { + Object ref = sourceClusterId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + sourceClusterId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * The cluster where the mutation was applied.
+       * Not set when `type` is `GARBAGE_COLLECTION`.
+       * 
+ * + * string source_cluster_id = 2; + * + * @param value The sourceClusterId to set. + * @return This builder for chaining. + */ + public Builder setSourceClusterId(String value) { + if (value == null) { + throw new NullPointerException(); + } + + sourceClusterId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * The cluster where the mutation was applied.
+       * Not set when `type` is `GARBAGE_COLLECTION`.
+       * 
+ * + * string source_cluster_id = 2; + * + * @return This builder for chaining. + */ + public Builder clearSourceClusterId() { + + sourceClusterId_ = getDefaultInstance().getSourceClusterId(); + onChanged(); + return this; + } + /** + * + * + *
+       * The cluster where the mutation was applied.
+       * Not set when `type` is `GARBAGE_COLLECTION`.
+       * 
+ * + * string source_cluster_id = 2; + * + * @param value The bytes for sourceClusterId to set. + * @return This builder for chaining. + */ + public Builder setSourceClusterIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + sourceClusterId_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString rowKey_ = com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
+       * The row key for all mutations that are part of this `DataChange`.
+       * If the `DataChange` is chunked across multiple messages, then this field
+       * will only be set for the first message.
+       * 
+ * + * bytes row_key = 3; + * + * @return The rowKey. + */ + @Override + public com.google.protobuf.ByteString getRowKey() { + return rowKey_; + } + /** + * + * + *
+       * The row key for all mutations that are part of this `DataChange`.
+       * If the `DataChange` is chunked across multiple messages, then this field
+       * will only be set for the first message.
+       * 
+ * + * bytes row_key = 3; + * + * @param value The rowKey to set. + * @return This builder for chaining. + */ + public Builder setRowKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + + rowKey_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * The row key for all mutations that are part of this `DataChange`.
+       * If the `DataChange` is chunked across multiple messages, then this field
+       * will only be set for the first message.
+       * 
+ * + * bytes row_key = 3; + * + * @return This builder for chaining. + */ + public Builder clearRowKey() { + + rowKey_ = getDefaultInstance().getRowKey(); + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp commitTimestamp_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimestampBuilder_; + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + * + * @return Whether the commitTimestamp field is set. + */ + public boolean hasCommitTimestamp() { + return commitTimestampBuilder_ != null || commitTimestamp_ != null; + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + * + * @return The commitTimestamp. + */ + public com.google.protobuf.Timestamp getCommitTimestamp() { + if (commitTimestampBuilder_ == null) { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } else { + return commitTimestampBuilder_.getMessage(); + } + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTimestamp_ = value; + onChanged(); + } else { + commitTimestampBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + public Builder setCommitTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = builderForValue.build(); + onChanged(); + } else { + commitTimestampBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + public Builder mergeCommitTimestamp(com.google.protobuf.Timestamp value) { + if (commitTimestampBuilder_ == null) { + if (commitTimestamp_ != null) { + commitTimestamp_ = + com.google.protobuf.Timestamp.newBuilder(commitTimestamp_) + .mergeFrom(value) + .buildPartial(); + } else { + commitTimestamp_ = value; + } + onChanged(); + } else { + commitTimestampBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + public Builder clearCommitTimestamp() { + if (commitTimestampBuilder_ == null) { + commitTimestamp_ = null; + onChanged(); + } else { + commitTimestamp_ = null; + commitTimestampBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimestampBuilder() { + + onChanged(); + return getCommitTimestampFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { + if (commitTimestampBuilder_ != null) { + return commitTimestampBuilder_.getMessageOrBuilder(); + } else { + return commitTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTimestamp_; + } + } + /** + * + * + *
+       * The timestamp at which the mutation was applied on the Bigtable server.
+       * 
+ * + * .google.protobuf.Timestamp commit_timestamp = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimestampFieldBuilder() { + if (commitTimestampBuilder_ == null) { + commitTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTimestamp(), getParentForChildren(), isClean()); + commitTimestamp_ = null; + } + return commitTimestampBuilder_; + } + + private int tiebreaker_; + /** + * + * + *
+       * A value that lets stream consumers reconstruct Bigtable's
+       * conflict resolution semantics.
+       * https://cloud.google.com/bigtable/docs/writes#conflict-resolution
+       * In the event that the same row key, column family, column qualifier,
+       * timestamp are modified on different clusters at the same
+       * `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
+       * one chosen for the eventually consistent state of the system.
+       * 
+ * + * int32 tiebreaker = 5; + * + * @return The tiebreaker. + */ + @Override + public int getTiebreaker() { + return tiebreaker_; + } + /** + * + * + *
+       * A value that lets stream consumers reconstruct Bigtable's
+       * conflict resolution semantics.
+       * https://cloud.google.com/bigtable/docs/writes#conflict-resolution
+       * In the event that the same row key, column family, column qualifier,
+       * timestamp are modified on different clusters at the same
+       * `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
+       * one chosen for the eventually consistent state of the system.
+       * 
+ * + * int32 tiebreaker = 5; + * + * @param value The tiebreaker to set. + * @return This builder for chaining. + */ + public Builder setTiebreaker(int value) { + + tiebreaker_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * A value that lets stream consumers reconstruct Bigtable's
+       * conflict resolution semantics.
+       * https://cloud.google.com/bigtable/docs/writes#conflict-resolution
+       * In the event that the same row key, column family, column qualifier,
+       * timestamp are modified on different clusters at the same
+       * `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
+       * one chosen for the eventually consistent state of the system.
+       * 
+ * + * int32 tiebreaker = 5; + * + * @return This builder for chaining. + */ + public Builder clearTiebreaker() { + + tiebreaker_ = 0; + onChanged(); + return this; + } + + private java.util.List chunks_ = java.util.Collections.emptyList(); + + private void ensureChunksIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + chunks_ = new java.util.ArrayList(chunks_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + MutationChunk, MutationChunk.Builder, MutationChunkOrBuilder> + chunksBuilder_; + + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public java.util.List getChunksList() { + if (chunksBuilder_ == null) { + return java.util.Collections.unmodifiableList(chunks_); + } else { + return chunksBuilder_.getMessageList(); + } + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public int getChunksCount() { + if (chunksBuilder_ == null) { + return chunks_.size(); + } else { + return chunksBuilder_.getCount(); + } + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public MutationChunk getChunks(int index) { + if (chunksBuilder_ == null) { + return chunks_.get(index); + } else { + return chunksBuilder_.getMessage(index); + } + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder setChunks(int index, MutationChunk value) { + if (chunksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChunksIsMutable(); + chunks_.set(index, value); + onChanged(); + } else { + chunksBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder setChunks(int index, MutationChunk.Builder builderForValue) { + if (chunksBuilder_ == null) { + ensureChunksIsMutable(); + chunks_.set(index, builderForValue.build()); + onChanged(); + } else { + chunksBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder addChunks(MutationChunk value) { + if (chunksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChunksIsMutable(); + chunks_.add(value); + onChanged(); + } else { + chunksBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder addChunks(int index, MutationChunk value) { + if (chunksBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureChunksIsMutable(); + chunks_.add(index, value); + onChanged(); + } else { + chunksBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder addChunks(MutationChunk.Builder builderForValue) { + if (chunksBuilder_ == null) { + ensureChunksIsMutable(); + chunks_.add(builderForValue.build()); + onChanged(); + } else { + chunksBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder addChunks(int index, MutationChunk.Builder builderForValue) { + if (chunksBuilder_ == null) { + ensureChunksIsMutable(); + chunks_.add(index, builderForValue.build()); + onChanged(); + } else { + chunksBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder addAllChunks(Iterable values) { + if (chunksBuilder_ == null) { + ensureChunksIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, chunks_); + onChanged(); + } else { + chunksBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder clearChunks() { + if (chunksBuilder_ == null) { + chunks_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + chunksBuilder_.clear(); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public Builder removeChunks(int index) { + if (chunksBuilder_ == null) { + ensureChunksIsMutable(); + chunks_.remove(index); + onChanged(); + } else { + chunksBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public MutationChunk.Builder getChunksBuilder(int index) { + return getChunksFieldBuilder().getBuilder(index); + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public MutationChunkOrBuilder getChunksOrBuilder(int index) { + if (chunksBuilder_ == null) { + return chunks_.get(index); + } else { + return chunksBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public java.util.List getChunksOrBuilderList() { + if (chunksBuilder_ != null) { + return chunksBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(chunks_); + } + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public MutationChunk.Builder addChunksBuilder() { + return getChunksFieldBuilder().addBuilder(MutationChunk.getDefaultInstance()); + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public MutationChunk.Builder addChunksBuilder(int index) { + return getChunksFieldBuilder().addBuilder(index, MutationChunk.getDefaultInstance()); + } + /** + * + * + *
+       * The mutations associated with this change to the partition.
+       * May contain complete mutations or chunks of a multi-message chunked
+       * `DataChange` record.
+       * 
+ * + * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; + * + */ + public java.util.List getChunksBuilderList() { + return getChunksFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + MutationChunk, MutationChunk.Builder, MutationChunkOrBuilder> + getChunksFieldBuilder() { + if (chunksBuilder_ == null) { + chunksBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + MutationChunk, MutationChunk.Builder, MutationChunkOrBuilder>( + chunks_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + chunks_ = null; + } + return chunksBuilder_; + } + + private boolean done_; + /** + * + * + *
+       * When true, indicates that the entire `DataChange` has been read
+       * and the client can safely process the message.
+       * 
+ * + * bool done = 8; + * + * @return The done. + */ + @Override + public boolean getDone() { + return done_; + } + /** + * + * + *
+       * When true, indicates that the entire `DataChange` has been read
+       * and the client can safely process the message.
+       * 
+ * + * bool done = 8; + * + * @param value The done to set. + * @return This builder for chaining. + */ + public Builder setDone(boolean value) { + + done_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * When true, indicates that the entire `DataChange` has been read
+       * and the client can safely process the message.
+       * 
+ * + * bool done = 8; + * + * @return This builder for chaining. + */ + public Builder clearDone() { + + done_ = false; + onChanged(); + return this; + } + + private Object token_ = ""; + /** + * + * + *
+       * An encoded position for this stream's partition to restart reading from.
+       * This token is for the StreamPartition from the request.
+       * 
+ * + * string token = 9; + * + * @return The token. + */ + public String getToken() { + Object ref = token_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + token_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+       * An encoded position for this stream's partition to restart reading from.
+       * This token is for the StreamPartition from the request.
+       * 
+ * + * string token = 9; + * + * @return The bytes for token. + */ + public com.google.protobuf.ByteString getTokenBytes() { + Object ref = token_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + token_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * An encoded position for this stream's partition to restart reading from.
+       * This token is for the StreamPartition from the request.
+       * 
+ * + * string token = 9; + * + * @param value The token to set. + * @return This builder for chaining. + */ + public Builder setToken(String value) { + if (value == null) { + throw new NullPointerException(); + } + + token_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * An encoded position for this stream's partition to restart reading from.
+       * This token is for the StreamPartition from the request.
+       * 
+ * + * string token = 9; + * + * @return This builder for chaining. + */ + public Builder clearToken() { + + token_ = getDefaultInstance().getToken(); + onChanged(); + return this; + } + /** + * + * + *
+       * An encoded position for this stream's partition to restart reading from.
+       * This token is for the StreamPartition from the request.
+       * 
+ * + * string token = 9; + * + * @param value The bytes for token to set. + * @return This builder for chaining. + */ + public Builder setTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + token_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp lowWatermark_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + lowWatermarkBuilder_; + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + * + * @return Whether the lowWatermark field is set. + */ + public boolean hasLowWatermark() { + return lowWatermarkBuilder_ != null || lowWatermark_ != null; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + * + * @return The lowWatermark. + */ + public com.google.protobuf.Timestamp getLowWatermark() { + if (lowWatermarkBuilder_ == null) { + return lowWatermark_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lowWatermark_; + } else { + return lowWatermarkBuilder_.getMessage(); + } + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + public Builder setLowWatermark(com.google.protobuf.Timestamp value) { + if (lowWatermarkBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lowWatermark_ = value; + onChanged(); + } else { + lowWatermarkBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + public Builder setLowWatermark(com.google.protobuf.Timestamp.Builder builderForValue) { + if (lowWatermarkBuilder_ == null) { + lowWatermark_ = builderForValue.build(); + onChanged(); + } else { + lowWatermarkBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + public Builder mergeLowWatermark(com.google.protobuf.Timestamp value) { + if (lowWatermarkBuilder_ == null) { + if (lowWatermark_ != null) { + lowWatermark_ = + com.google.protobuf.Timestamp.newBuilder(lowWatermark_) + .mergeFrom(value) + .buildPartial(); + } else { + lowWatermark_ = value; + } + onChanged(); + } else { + lowWatermarkBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + public Builder clearLowWatermark() { + if (lowWatermarkBuilder_ == null) { + lowWatermark_ = null; + onChanged(); + } else { + lowWatermark_ = null; + lowWatermarkBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + public com.google.protobuf.Timestamp.Builder getLowWatermarkBuilder() { + + onChanged(); + return getLowWatermarkFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { + if (lowWatermarkBuilder_ != null) { + return lowWatermarkBuilder_.getMessageOrBuilder(); + } else { + return lowWatermark_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lowWatermark_; + } + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 10; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getLowWatermarkFieldBuilder() { + if (lowWatermarkBuilder_ == null) { + lowWatermarkBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getLowWatermark(), getParentForChildren(), isClean()); + lowWatermark_ = null; + } + return lowWatermarkBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamResponse.DataChange) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.DataChange) + private static final DataChange DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new DataChange(); + } + + public static DataChange getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public DataChange parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DataChange(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public DataChange getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface HeartbeatOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * A token that can be provided to a subsequent `ReadChangeStream` call
+     * to pick up reading at the current stream position.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + * + * @return Whether the continuationToken field is set. + */ + boolean hasContinuationToken(); + /** + * + * + *
+     * A token that can be provided to a subsequent `ReadChangeStream` call
+     * to pick up reading at the current stream position.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + * + * @return The continuationToken. + */ + StreamContinuationToken getContinuationToken(); + /** + * + * + *
+     * A token that can be provided to a subsequent `ReadChangeStream` call
+     * to pick up reading at the current stream position.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder(); + + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + * + * @return Whether the lowWatermark field is set. + */ + boolean hasLowWatermark(); + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + * + * @return The lowWatermark. + */ + com.google.protobuf.Timestamp getLowWatermark(); + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder(); + } + /** + * + * + *
+   * A periodic message with information that can be used to checkpoint
+   * the state of a stream.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.Heartbeat} + */ + public static final class Heartbeat extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) + HeartbeatOrBuilder { + private static final long serialVersionUID = 0L; + // Use Heartbeat.newBuilder() to construct. + private Heartbeat(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Heartbeat() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new Heartbeat(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private Heartbeat( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + StreamContinuationToken.Builder subBuilder = null; + if (continuationToken_ != null) { + subBuilder = continuationToken_.toBuilder(); + } + continuationToken_ = + input.readMessage(StreamContinuationToken.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(continuationToken_); + continuationToken_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (lowWatermark_ != null) { + subBuilder = lowWatermark_.toBuilder(); + } + lowWatermark_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lowWatermark_); + lowWatermark_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_fieldAccessorTable + .ensureFieldAccessorsInitialized(Heartbeat.class, Builder.class); + } + + public static final int CONTINUATION_TOKEN_FIELD_NUMBER = 1; + private StreamContinuationToken continuationToken_; + /** + * + * + *
+     * A token that can be provided to a subsequent `ReadChangeStream` call
+     * to pick up reading at the current stream position.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + * + * @return Whether the continuationToken field is set. + */ + @Override + public boolean hasContinuationToken() { + return continuationToken_ != null; + } + /** + * + * + *
+     * A token that can be provided to a subsequent `ReadChangeStream` call
+     * to pick up reading at the current stream position.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + * + * @return The continuationToken. + */ + @Override + public StreamContinuationToken getContinuationToken() { + return continuationToken_ == null + ? StreamContinuationToken.getDefaultInstance() + : continuationToken_; + } + /** + * + * + *
+     * A token that can be provided to a subsequent `ReadChangeStream` call
+     * to pick up reading at the current stream position.
+     * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + @Override + public StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { + return getContinuationToken(); + } + + public static final int LOW_WATERMARK_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp lowWatermark_; + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + * + * @return Whether the lowWatermark field is set. + */ + @Override + public boolean hasLowWatermark() { + return lowWatermark_ != null; + } + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + * + * @return The lowWatermark. + */ + @Override + public com.google.protobuf.Timestamp getLowWatermark() { + return lowWatermark_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lowWatermark_; + } + /** + * + * + *
+     * A commit timestamp that is lower than or equal to any timestamp for a
+     * record that will be delivered in the future on the stream. For an example
+     * usage see https://beam.apache.org/documentation/basics/#watermarks
+     * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + @Override + public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { + return getLowWatermark(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (continuationToken_ != null) { + output.writeMessage(1, getContinuationToken()); + } + if (lowWatermark_ != null) { + output.writeMessage(2, getLowWatermark()); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (continuationToken_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getContinuationToken()); + } + if (lowWatermark_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getLowWatermark()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof Heartbeat)) { + return super.equals(obj); + } + Heartbeat other = (Heartbeat) obj; + + if (hasContinuationToken() != other.hasContinuationToken()) return false; + if (hasContinuationToken()) { + if (!getContinuationToken().equals(other.getContinuationToken())) return false; + } + if (hasLowWatermark() != other.hasLowWatermark()) return false; + if (hasLowWatermark()) { + if (!getLowWatermark().equals(other.getLowWatermark())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasContinuationToken()) { + hash = (37 * hash) + CONTINUATION_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getContinuationToken().hashCode(); + } + if (hasLowWatermark()) { + hash = (37 * hash) + LOW_WATERMARK_FIELD_NUMBER; + hash = (53 * hash) + getLowWatermark().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static Heartbeat parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Heartbeat parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Heartbeat parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Heartbeat parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Heartbeat parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Heartbeat parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Heartbeat parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Heartbeat parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static Heartbeat parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static Heartbeat parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static Heartbeat parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static Heartbeat parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(Heartbeat prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * A periodic message with information that can be used to checkpoint
+     * the state of a stream.
+     * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.Heartbeat} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) + HeartbeatOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_fieldAccessorTable + .ensureFieldAccessorsInitialized(Heartbeat.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + if (continuationTokenBuilder_ == null) { + continuationToken_ = null; + } else { + continuationToken_ = null; + continuationTokenBuilder_ = null; + } + if (lowWatermarkBuilder_ == null) { + lowWatermark_ = null; + } else { + lowWatermark_ = null; + lowWatermarkBuilder_ = null; + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; + } + + @Override + public Heartbeat getDefaultInstanceForType() { + return Heartbeat.getDefaultInstance(); + } + + @Override + public Heartbeat build() { + Heartbeat result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public Heartbeat buildPartial() { + Heartbeat result = new Heartbeat(this); + if (continuationTokenBuilder_ == null) { + result.continuationToken_ = continuationToken_; + } else { + result.continuationToken_ = continuationTokenBuilder_.build(); + } + if (lowWatermarkBuilder_ == null) { + result.lowWatermark_ = lowWatermark_; + } else { + result.lowWatermark_ = lowWatermarkBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Heartbeat) { + return mergeFrom((Heartbeat) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Heartbeat other) { + if (other == Heartbeat.getDefaultInstance()) return this; + if (other.hasContinuationToken()) { + mergeContinuationToken(other.getContinuationToken()); + } + if (other.hasLowWatermark()) { + mergeLowWatermark(other.getLowWatermark()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Heartbeat parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (Heartbeat) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private StreamContinuationToken continuationToken_; + private com.google.protobuf.SingleFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder> + continuationTokenBuilder_; + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + * + * @return Whether the continuationToken field is set. + */ + public boolean hasContinuationToken() { + return continuationTokenBuilder_ != null || continuationToken_ != null; + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + * + * @return The continuationToken. + */ + public StreamContinuationToken getContinuationToken() { + if (continuationTokenBuilder_ == null) { + return continuationToken_ == null + ? StreamContinuationToken.getDefaultInstance() + : continuationToken_; + } else { + return continuationTokenBuilder_.getMessage(); + } + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + public Builder setContinuationToken(StreamContinuationToken value) { + if (continuationTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + continuationToken_ = value; + onChanged(); + } else { + continuationTokenBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + public Builder setContinuationToken(StreamContinuationToken.Builder builderForValue) { + if (continuationTokenBuilder_ == null) { + continuationToken_ = builderForValue.build(); + onChanged(); + } else { + continuationTokenBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + public Builder mergeContinuationToken(StreamContinuationToken value) { + if (continuationTokenBuilder_ == null) { + if (continuationToken_ != null) { + continuationToken_ = + StreamContinuationToken.newBuilder(continuationToken_) + .mergeFrom(value) + .buildPartial(); + } else { + continuationToken_ = value; + } + onChanged(); + } else { + continuationTokenBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + public Builder clearContinuationToken() { + if (continuationTokenBuilder_ == null) { + continuationToken_ = null; + onChanged(); + } else { + continuationToken_ = null; + continuationTokenBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + public StreamContinuationToken.Builder getContinuationTokenBuilder() { + + onChanged(); + return getContinuationTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + public StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { + if (continuationTokenBuilder_ != null) { + return continuationTokenBuilder_.getMessageOrBuilder(); + } else { + return continuationToken_ == null + ? StreamContinuationToken.getDefaultInstance() + : continuationToken_; + } + } + /** + * + * + *
+       * A token that can be provided to a subsequent `ReadChangeStream` call
+       * to pick up reading at the current stream position.
+       * 
+ * + * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder> + getContinuationTokenFieldBuilder() { + if (continuationTokenBuilder_ == null) { + continuationTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder>( + getContinuationToken(), getParentForChildren(), isClean()); + continuationToken_ = null; + } + return continuationTokenBuilder_; + } + + private com.google.protobuf.Timestamp lowWatermark_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + lowWatermarkBuilder_; + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + * + * @return Whether the lowWatermark field is set. + */ + public boolean hasLowWatermark() { + return lowWatermarkBuilder_ != null || lowWatermark_ != null; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + * + * @return The lowWatermark. + */ + public com.google.protobuf.Timestamp getLowWatermark() { + if (lowWatermarkBuilder_ == null) { + return lowWatermark_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lowWatermark_; + } else { + return lowWatermarkBuilder_.getMessage(); + } + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + public Builder setLowWatermark(com.google.protobuf.Timestamp value) { + if (lowWatermarkBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lowWatermark_ = value; + onChanged(); + } else { + lowWatermarkBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + public Builder setLowWatermark(com.google.protobuf.Timestamp.Builder builderForValue) { + if (lowWatermarkBuilder_ == null) { + lowWatermark_ = builderForValue.build(); + onChanged(); + } else { + lowWatermarkBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + public Builder mergeLowWatermark(com.google.protobuf.Timestamp value) { + if (lowWatermarkBuilder_ == null) { + if (lowWatermark_ != null) { + lowWatermark_ = + com.google.protobuf.Timestamp.newBuilder(lowWatermark_) + .mergeFrom(value) + .buildPartial(); + } else { + lowWatermark_ = value; + } + onChanged(); + } else { + lowWatermarkBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + public Builder clearLowWatermark() { + if (lowWatermarkBuilder_ == null) { + lowWatermark_ = null; + onChanged(); + } else { + lowWatermark_ = null; + lowWatermarkBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + public com.google.protobuf.Timestamp.Builder getLowWatermarkBuilder() { + + onChanged(); + return getLowWatermarkFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { + if (lowWatermarkBuilder_ != null) { + return lowWatermarkBuilder_.getMessageOrBuilder(); + } else { + return lowWatermark_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : lowWatermark_; + } + } + /** + * + * + *
+       * A commit timestamp that is lower than or equal to any timestamp for a
+       * record that will be delivered in the future on the stream. For an example
+       * usage see https://beam.apache.org/documentation/basics/#watermarks
+       * 
+ * + * .google.protobuf.Timestamp low_watermark = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getLowWatermarkFieldBuilder() { + if (lowWatermarkBuilder_ == null) { + lowWatermarkBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getLowWatermark(), getParentForChildren(), isClean()); + lowWatermark_ = null; + } + return lowWatermarkBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) + private static final Heartbeat DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new Heartbeat(); + } + + public static Heartbeat getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public Heartbeat parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Heartbeat(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public Heartbeat getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface CloseStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The status of the stream.
+     * 
+ * + * .google.rpc.Status status = 1; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + /** + * + * + *
+     * The status of the stream.
+     * 
+ * + * .google.rpc.Status status = 1; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + /** + * + * + *
+     * The status of the stream.
+     * 
+ * + * .google.rpc.Status status = 1; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); + + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + java.util.List getContinuationTokensList(); + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + StreamContinuationToken getContinuationTokens(int index); + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + int getContinuationTokensCount(); + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + java.util.List getContinuationTokensOrBuilderList(); + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index); + } + /** + * + * + *
+   * A message indicating that the client should stop reading from the stream.
+   * If status is OK and `continuation_tokens` is empty, the stream has finished
+   * (for example if there was an `end_time` specified).
+   * If `continuation_tokens` is present, then a change in partitioning requires
+   * the client to open a new stream for each token to resume reading.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.CloseStream} + */ + public static final class CloseStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) + CloseStreamOrBuilder { + private static final long serialVersionUID = 0L; + // Use CloseStream.newBuilder() to construct. + private CloseStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CloseStream() { + continuationTokens_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new CloseStream(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private CloseStream( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.rpc.Status.Builder subBuilder = null; + if (status_ != null) { + subBuilder = status_.toBuilder(); + } + status_ = input.readMessage(com.google.rpc.Status.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(status_); + status_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + continuationTokens_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + continuationTokens_.add( + input.readMessage(StreamContinuationToken.parser(), extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + continuationTokens_ = java.util.Collections.unmodifiableList(continuationTokens_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_fieldAccessorTable + .ensureFieldAccessorsInitialized(CloseStream.class, Builder.class); + } + + public static final int STATUS_FIELD_NUMBER = 1; + private com.google.rpc.Status status_; + /** + * + * + *
+     * The status of the stream.
+     * 
+ * + * .google.rpc.Status status = 1; + * + * @return Whether the status field is set. + */ + @Override + public boolean hasStatus() { + return status_ != null; + } + /** + * + * + *
+     * The status of the stream.
+     * 
+ * + * .google.rpc.Status status = 1; + * + * @return The status. + */ + @Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + /** + * + * + *
+     * The status of the stream.
+     * 
+ * + * .google.rpc.Status status = 1; + */ + @Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return getStatus(); + } + + public static final int CONTINUATION_TOKENS_FIELD_NUMBER = 2; + private java.util.List continuationTokens_; + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + @Override + public java.util.List getContinuationTokensList() { + return continuationTokens_; + } + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + @Override + public java.util.List + getContinuationTokensOrBuilderList() { + return continuationTokens_; + } + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + @Override + public int getContinuationTokensCount() { + return continuationTokens_.size(); + } + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + @Override + public StreamContinuationToken getContinuationTokens(int index) { + return continuationTokens_.get(index); + } + /** + * + * + *
+     * If non-empty, contains the information needed to start reading the new
+     * partition(s) that contain segments of this partition's row range.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + @Override + public StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index) { + return continuationTokens_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (status_ != null) { + output.writeMessage(1, getStatus()); + } + for (int i = 0; i < continuationTokens_.size(); i++) { + output.writeMessage(2, continuationTokens_.get(i)); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (status_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getStatus()); + } + for (int i = 0; i < continuationTokens_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(2, continuationTokens_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof CloseStream)) { + return super.equals(obj); + } + CloseStream other = (CloseStream) obj; + + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (!getContinuationTokensList().equals(other.getContinuationTokensList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + if (getContinuationTokensCount() > 0) { + hash = (37 * hash) + CONTINUATION_TOKENS_FIELD_NUMBER; + hash = (53 * hash) + getContinuationTokensList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static CloseStream parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static CloseStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static CloseStream parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static CloseStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static CloseStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static CloseStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static CloseStream parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static CloseStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static CloseStream parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static CloseStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static CloseStream parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static CloseStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(CloseStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * A message indicating that the client should stop reading from the stream.
+     * If status is OK and `continuation_tokens` is empty, the stream has finished
+     * (for example if there was an `end_time` specified).
+     * If `continuation_tokens` is present, then a change in partitioning requires
+     * the client to open a new stream for each token to resume reading.
+     * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.CloseStream} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) + CloseStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_fieldAccessorTable + .ensureFieldAccessorsInitialized(CloseStream.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getContinuationTokensFieldBuilder(); + } + } + + @Override + public Builder clear() { + super.clear(); + if (statusBuilder_ == null) { + status_ = null; + } else { + status_ = null; + statusBuilder_ = null; + } + if (continuationTokensBuilder_ == null) { + continuationTokens_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + continuationTokensBuilder_.clear(); + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; + } + + @Override + public CloseStream getDefaultInstanceForType() { + return CloseStream.getDefaultInstance(); + } + + @Override + public CloseStream build() { + CloseStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public CloseStream buildPartial() { + CloseStream result = new CloseStream(this); + int from_bitField0_ = bitField0_; + if (statusBuilder_ == null) { + result.status_ = status_; + } else { + result.status_ = statusBuilder_.build(); + } + if (continuationTokensBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + continuationTokens_ = java.util.Collections.unmodifiableList(continuationTokens_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.continuationTokens_ = continuationTokens_; + } else { + result.continuationTokens_ = continuationTokensBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof CloseStream) { + return mergeFrom((CloseStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(CloseStream other) { + if (other == CloseStream.getDefaultInstance()) return this; + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + if (continuationTokensBuilder_ == null) { + if (!other.continuationTokens_.isEmpty()) { + if (continuationTokens_.isEmpty()) { + continuationTokens_ = other.continuationTokens_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureContinuationTokensIsMutable(); + continuationTokens_.addAll(other.continuationTokens_); + } + onChanged(); + } + } else { + if (!other.continuationTokens_.isEmpty()) { + if (continuationTokensBuilder_.isEmpty()) { + continuationTokensBuilder_.dispose(); + continuationTokensBuilder_ = null; + continuationTokens_ = other.continuationTokens_; + bitField0_ = (bitField0_ & ~0x00000001); + continuationTokensBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getContinuationTokensFieldBuilder() + : null; + } else { + continuationTokensBuilder_.addAllMessages(other.continuationTokens_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + CloseStream parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (CloseStream) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return statusBuilder_ != null || status_ != null; + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + onChanged(); + } else { + statusBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + onChanged(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (status_ != null) { + status_ = com.google.rpc.Status.newBuilder(status_).mergeFrom(value).buildPartial(); + } else { + status_ = value; + } + onChanged(); + } else { + statusBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + public Builder clearStatus() { + if (statusBuilder_ == null) { + status_ = null; + onChanged(); + } else { + status_ = null; + statusBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + + onChanged(); + return getStatusFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + /** + * + * + *
+       * The status of the stream.
+       * 
+ * + * .google.rpc.Status status = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + getStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + private java.util.List continuationTokens_ = + java.util.Collections.emptyList(); + + private void ensureContinuationTokensIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + continuationTokens_ = + new java.util.ArrayList(continuationTokens_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder> + continuationTokensBuilder_; + + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public java.util.List getContinuationTokensList() { + if (continuationTokensBuilder_ == null) { + return java.util.Collections.unmodifiableList(continuationTokens_); + } else { + return continuationTokensBuilder_.getMessageList(); + } + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public int getContinuationTokensCount() { + if (continuationTokensBuilder_ == null) { + return continuationTokens_.size(); + } else { + return continuationTokensBuilder_.getCount(); + } + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public StreamContinuationToken getContinuationTokens(int index) { + if (continuationTokensBuilder_ == null) { + return continuationTokens_.get(index); + } else { + return continuationTokensBuilder_.getMessage(index); + } + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder setContinuationTokens(int index, StreamContinuationToken value) { + if (continuationTokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureContinuationTokensIsMutable(); + continuationTokens_.set(index, value); + onChanged(); + } else { + continuationTokensBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder setContinuationTokens( + int index, StreamContinuationToken.Builder builderForValue) { + if (continuationTokensBuilder_ == null) { + ensureContinuationTokensIsMutable(); + continuationTokens_.set(index, builderForValue.build()); + onChanged(); + } else { + continuationTokensBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder addContinuationTokens(StreamContinuationToken value) { + if (continuationTokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureContinuationTokensIsMutable(); + continuationTokens_.add(value); + onChanged(); + } else { + continuationTokensBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder addContinuationTokens(int index, StreamContinuationToken value) { + if (continuationTokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureContinuationTokensIsMutable(); + continuationTokens_.add(index, value); + onChanged(); + } else { + continuationTokensBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder addContinuationTokens(StreamContinuationToken.Builder builderForValue) { + if (continuationTokensBuilder_ == null) { + ensureContinuationTokensIsMutable(); + continuationTokens_.add(builderForValue.build()); + onChanged(); + } else { + continuationTokensBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder addContinuationTokens( + int index, StreamContinuationToken.Builder builderForValue) { + if (continuationTokensBuilder_ == null) { + ensureContinuationTokensIsMutable(); + continuationTokens_.add(index, builderForValue.build()); + onChanged(); + } else { + continuationTokensBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder addAllContinuationTokens(Iterable values) { + if (continuationTokensBuilder_ == null) { + ensureContinuationTokensIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, continuationTokens_); + onChanged(); + } else { + continuationTokensBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder clearContinuationTokens() { + if (continuationTokensBuilder_ == null) { + continuationTokens_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + continuationTokensBuilder_.clear(); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public Builder removeContinuationTokens(int index) { + if (continuationTokensBuilder_ == null) { + ensureContinuationTokensIsMutable(); + continuationTokens_.remove(index); + onChanged(); + } else { + continuationTokensBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public StreamContinuationToken.Builder getContinuationTokensBuilder(int index) { + return getContinuationTokensFieldBuilder().getBuilder(index); + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index) { + if (continuationTokensBuilder_ == null) { + return continuationTokens_.get(index); + } else { + return continuationTokensBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public java.util.List + getContinuationTokensOrBuilderList() { + if (continuationTokensBuilder_ != null) { + return continuationTokensBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(continuationTokens_); + } + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public StreamContinuationToken.Builder addContinuationTokensBuilder() { + return getContinuationTokensFieldBuilder() + .addBuilder(StreamContinuationToken.getDefaultInstance()); + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public StreamContinuationToken.Builder addContinuationTokensBuilder(int index) { + return getContinuationTokensFieldBuilder() + .addBuilder(index, StreamContinuationToken.getDefaultInstance()); + } + /** + * + * + *
+       * If non-empty, contains the information needed to start reading the new
+       * partition(s) that contain segments of this partition's row range.
+       * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; + */ + public java.util.List getContinuationTokensBuilderList() { + return getContinuationTokensFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder> + getContinuationTokensFieldBuilder() { + if (continuationTokensBuilder_ == null) { + continuationTokensBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder>( + continuationTokens_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + continuationTokens_ = null; + } + return continuationTokensBuilder_; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) + private static final CloseStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new CloseStream(); + } + + public static CloseStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public CloseStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CloseStream(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public CloseStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int streamRecordCase_ = 0; + private Object streamRecord_; + + public enum StreamRecordCase implements com.google.protobuf.Internal.EnumLite, InternalOneOfEnum { + DATA_CHANGE(1), + HEARTBEAT(2), + CLOSE_STREAM(3), + STREAMRECORD_NOT_SET(0); + private final int value; + + private StreamRecordCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static StreamRecordCase valueOf(int value) { + return forNumber(value); + } + + public static StreamRecordCase forNumber(int value) { + switch (value) { + case 1: + return DATA_CHANGE; + case 2: + return HEARTBEAT; + case 3: + return CLOSE_STREAM; + case 0: + return STREAMRECORD_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public StreamRecordCase getStreamRecordCase() { + return StreamRecordCase.forNumber(streamRecordCase_); + } + + public static final int DATA_CHANGE_FIELD_NUMBER = 1; + /** + * + * + *
+   * A mutation to the partition.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + * + * @return Whether the dataChange field is set. + */ + @Override + public boolean hasDataChange() { + return streamRecordCase_ == 1; + } + /** + * + * + *
+   * A mutation to the partition.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + * + * @return The dataChange. + */ + @Override + public DataChange getDataChange() { + if (streamRecordCase_ == 1) { + return (DataChange) streamRecord_; + } + return DataChange.getDefaultInstance(); + } + /** + * + * + *
+   * A mutation to the partition.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + @Override + public DataChangeOrBuilder getDataChangeOrBuilder() { + if (streamRecordCase_ == 1) { + return (DataChange) streamRecord_; + } + return DataChange.getDefaultInstance(); + } + + public static final int HEARTBEAT_FIELD_NUMBER = 2; + /** + * + * + *
+   * A periodic heartbeat message.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + * + * @return Whether the heartbeat field is set. + */ + @Override + public boolean hasHeartbeat() { + return streamRecordCase_ == 2; + } + /** + * + * + *
+   * A periodic heartbeat message.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + * + * @return The heartbeat. + */ + @Override + public Heartbeat getHeartbeat() { + if (streamRecordCase_ == 2) { + return (Heartbeat) streamRecord_; + } + return Heartbeat.getDefaultInstance(); + } + /** + * + * + *
+   * A periodic heartbeat message.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + @Override + public HeartbeatOrBuilder getHeartbeatOrBuilder() { + if (streamRecordCase_ == 2) { + return (Heartbeat) streamRecord_; + } + return Heartbeat.getDefaultInstance(); + } + + public static final int CLOSE_STREAM_FIELD_NUMBER = 3; + /** + * + * + *
+   * An indication that the stream should be closed.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + * + * @return Whether the closeStream field is set. + */ + @Override + public boolean hasCloseStream() { + return streamRecordCase_ == 3; + } + /** + * + * + *
+   * An indication that the stream should be closed.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + * + * @return The closeStream. + */ + @Override + public CloseStream getCloseStream() { + if (streamRecordCase_ == 3) { + return (CloseStream) streamRecord_; + } + return CloseStream.getDefaultInstance(); + } + /** + * + * + *
+   * An indication that the stream should be closed.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + @Override + public CloseStreamOrBuilder getCloseStreamOrBuilder() { + if (streamRecordCase_ == 3) { + return (CloseStream) streamRecord_; + } + return CloseStream.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (streamRecordCase_ == 1) { + output.writeMessage(1, (DataChange) streamRecord_); + } + if (streamRecordCase_ == 2) { + output.writeMessage(2, (Heartbeat) streamRecord_); + } + if (streamRecordCase_ == 3) { + output.writeMessage(3, (CloseStream) streamRecord_); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (streamRecordCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, (DataChange) streamRecord_); + } + if (streamRecordCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(2, (Heartbeat) streamRecord_); + } + if (streamRecordCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(3, (CloseStream) streamRecord_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ReadChangeStreamResponse)) { + return super.equals(obj); + } + ReadChangeStreamResponse other = (ReadChangeStreamResponse) obj; + + if (!getStreamRecordCase().equals(other.getStreamRecordCase())) return false; + switch (streamRecordCase_) { + case 1: + if (!getDataChange().equals(other.getDataChange())) return false; + break; + case 2: + if (!getHeartbeat().equals(other.getHeartbeat())) return false; + break; + case 3: + if (!getCloseStream().equals(other.getCloseStream())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (streamRecordCase_) { + case 1: + hash = (37 * hash) + DATA_CHANGE_FIELD_NUMBER; + hash = (53 * hash) + getDataChange().hashCode(); + break; + case 2: + hash = (37 * hash) + HEARTBEAT_FIELD_NUMBER; + hash = (53 * hash) + getHeartbeat().hashCode(); + break; + case 3: + hash = (37 * hash) + CLOSE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getCloseStream().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static ReadChangeStreamResponse parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ReadChangeStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ReadChangeStreamResponse parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ReadChangeStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ReadChangeStreamResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static ReadChangeStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static ReadChangeStreamResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ReadChangeStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static ReadChangeStreamResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static ReadChangeStreamResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static ReadChangeStreamResponse parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static ReadChangeStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(ReadChangeStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * Response message for Bigtable.ReadChangeStream.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse) + ReadChangeStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized(ReadChangeStreamResponse.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + streamRecordCase_ = 0; + streamRecord_ = null; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + } + + @Override + public ReadChangeStreamResponse getDefaultInstanceForType() { + return ReadChangeStreamResponse.getDefaultInstance(); + } + + @Override + public ReadChangeStreamResponse build() { + ReadChangeStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public ReadChangeStreamResponse buildPartial() { + ReadChangeStreamResponse result = new ReadChangeStreamResponse(this); + if (streamRecordCase_ == 1) { + if (dataChangeBuilder_ == null) { + result.streamRecord_ = streamRecord_; + } else { + result.streamRecord_ = dataChangeBuilder_.build(); + } + } + if (streamRecordCase_ == 2) { + if (heartbeatBuilder_ == null) { + result.streamRecord_ = streamRecord_; + } else { + result.streamRecord_ = heartbeatBuilder_.build(); + } + } + if (streamRecordCase_ == 3) { + if (closeStreamBuilder_ == null) { + result.streamRecord_ = streamRecord_; + } else { + result.streamRecord_ = closeStreamBuilder_.build(); + } + } + result.streamRecordCase_ = streamRecordCase_; + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof ReadChangeStreamResponse) { + return mergeFrom((ReadChangeStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(ReadChangeStreamResponse other) { + if (other == ReadChangeStreamResponse.getDefaultInstance()) return this; + switch (other.getStreamRecordCase()) { + case DATA_CHANGE: + { + mergeDataChange(other.getDataChange()); + break; + } + case HEARTBEAT: + { + mergeHeartbeat(other.getHeartbeat()); + break; + } + case CLOSE_STREAM: + { + mergeCloseStream(other.getCloseStream()); + break; + } + case STREAMRECORD_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + ReadChangeStreamResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (ReadChangeStreamResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int streamRecordCase_ = 0; + private Object streamRecord_; + + public StreamRecordCase getStreamRecordCase() { + return StreamRecordCase.forNumber(streamRecordCase_); + } + + public Builder clearStreamRecord() { + streamRecordCase_ = 0; + streamRecord_ = null; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + DataChange, DataChange.Builder, DataChangeOrBuilder> + dataChangeBuilder_; + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + * + * @return Whether the dataChange field is set. + */ + @Override + public boolean hasDataChange() { + return streamRecordCase_ == 1; + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + * + * @return The dataChange. + */ + @Override + public DataChange getDataChange() { + if (dataChangeBuilder_ == null) { + if (streamRecordCase_ == 1) { + return (DataChange) streamRecord_; + } + return DataChange.getDefaultInstance(); + } else { + if (streamRecordCase_ == 1) { + return dataChangeBuilder_.getMessage(); + } + return DataChange.getDefaultInstance(); + } + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + public Builder setDataChange(DataChange value) { + if (dataChangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + streamRecord_ = value; + onChanged(); + } else { + dataChangeBuilder_.setMessage(value); + } + streamRecordCase_ = 1; + return this; + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + public Builder setDataChange(DataChange.Builder builderForValue) { + if (dataChangeBuilder_ == null) { + streamRecord_ = builderForValue.build(); + onChanged(); + } else { + dataChangeBuilder_.setMessage(builderForValue.build()); + } + streamRecordCase_ = 1; + return this; + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + public Builder mergeDataChange(DataChange value) { + if (dataChangeBuilder_ == null) { + if (streamRecordCase_ == 1 && streamRecord_ != DataChange.getDefaultInstance()) { + streamRecord_ = + DataChange.newBuilder((DataChange) streamRecord_).mergeFrom(value).buildPartial(); + } else { + streamRecord_ = value; + } + onChanged(); + } else { + if (streamRecordCase_ == 1) { + dataChangeBuilder_.mergeFrom(value); + } else { + dataChangeBuilder_.setMessage(value); + } + } + streamRecordCase_ = 1; + return this; + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + public Builder clearDataChange() { + if (dataChangeBuilder_ == null) { + if (streamRecordCase_ == 1) { + streamRecordCase_ = 0; + streamRecord_ = null; + onChanged(); + } + } else { + if (streamRecordCase_ == 1) { + streamRecordCase_ = 0; + streamRecord_ = null; + } + dataChangeBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + public DataChange.Builder getDataChangeBuilder() { + return getDataChangeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + @Override + public DataChangeOrBuilder getDataChangeOrBuilder() { + if ((streamRecordCase_ == 1) && (dataChangeBuilder_ != null)) { + return dataChangeBuilder_.getMessageOrBuilder(); + } else { + if (streamRecordCase_ == 1) { + return (DataChange) streamRecord_; + } + return DataChange.getDefaultInstance(); + } + } + /** + * + * + *
+     * A mutation to the partition.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + DataChange, DataChange.Builder, DataChangeOrBuilder> + getDataChangeFieldBuilder() { + if (dataChangeBuilder_ == null) { + if (!(streamRecordCase_ == 1)) { + streamRecord_ = DataChange.getDefaultInstance(); + } + dataChangeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + DataChange, DataChange.Builder, DataChangeOrBuilder>( + (DataChange) streamRecord_, getParentForChildren(), isClean()); + streamRecord_ = null; + } + streamRecordCase_ = 1; + onChanged(); + ; + return dataChangeBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + Heartbeat, Heartbeat.Builder, HeartbeatOrBuilder> + heartbeatBuilder_; + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + * + * @return Whether the heartbeat field is set. + */ + @Override + public boolean hasHeartbeat() { + return streamRecordCase_ == 2; + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + * + * @return The heartbeat. + */ + @Override + public Heartbeat getHeartbeat() { + if (heartbeatBuilder_ == null) { + if (streamRecordCase_ == 2) { + return (Heartbeat) streamRecord_; + } + return Heartbeat.getDefaultInstance(); + } else { + if (streamRecordCase_ == 2) { + return heartbeatBuilder_.getMessage(); + } + return Heartbeat.getDefaultInstance(); + } + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + public Builder setHeartbeat(Heartbeat value) { + if (heartbeatBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + streamRecord_ = value; + onChanged(); + } else { + heartbeatBuilder_.setMessage(value); + } + streamRecordCase_ = 2; + return this; + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + public Builder setHeartbeat(Heartbeat.Builder builderForValue) { + if (heartbeatBuilder_ == null) { + streamRecord_ = builderForValue.build(); + onChanged(); + } else { + heartbeatBuilder_.setMessage(builderForValue.build()); + } + streamRecordCase_ = 2; + return this; + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + public Builder mergeHeartbeat(Heartbeat value) { + if (heartbeatBuilder_ == null) { + if (streamRecordCase_ == 2 && streamRecord_ != Heartbeat.getDefaultInstance()) { + streamRecord_ = + Heartbeat.newBuilder((Heartbeat) streamRecord_).mergeFrom(value).buildPartial(); + } else { + streamRecord_ = value; + } + onChanged(); + } else { + if (streamRecordCase_ == 2) { + heartbeatBuilder_.mergeFrom(value); + } else { + heartbeatBuilder_.setMessage(value); + } + } + streamRecordCase_ = 2; + return this; + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + public Builder clearHeartbeat() { + if (heartbeatBuilder_ == null) { + if (streamRecordCase_ == 2) { + streamRecordCase_ = 0; + streamRecord_ = null; + onChanged(); + } + } else { + if (streamRecordCase_ == 2) { + streamRecordCase_ = 0; + streamRecord_ = null; + } + heartbeatBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + public Heartbeat.Builder getHeartbeatBuilder() { + return getHeartbeatFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + @Override + public HeartbeatOrBuilder getHeartbeatOrBuilder() { + if ((streamRecordCase_ == 2) && (heartbeatBuilder_ != null)) { + return heartbeatBuilder_.getMessageOrBuilder(); + } else { + if (streamRecordCase_ == 2) { + return (Heartbeat) streamRecord_; + } + return Heartbeat.getDefaultInstance(); + } + } + /** + * + * + *
+     * A periodic heartbeat message.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + Heartbeat, Heartbeat.Builder, HeartbeatOrBuilder> + getHeartbeatFieldBuilder() { + if (heartbeatBuilder_ == null) { + if (!(streamRecordCase_ == 2)) { + streamRecord_ = Heartbeat.getDefaultInstance(); + } + heartbeatBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + Heartbeat, Heartbeat.Builder, HeartbeatOrBuilder>( + (Heartbeat) streamRecord_, getParentForChildren(), isClean()); + streamRecord_ = null; + } + streamRecordCase_ = 2; + onChanged(); + ; + return heartbeatBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + CloseStream, CloseStream.Builder, CloseStreamOrBuilder> + closeStreamBuilder_; + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + * + * @return Whether the closeStream field is set. + */ + @Override + public boolean hasCloseStream() { + return streamRecordCase_ == 3; + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + * + * @return The closeStream. + */ + @Override + public CloseStream getCloseStream() { + if (closeStreamBuilder_ == null) { + if (streamRecordCase_ == 3) { + return (CloseStream) streamRecord_; + } + return CloseStream.getDefaultInstance(); + } else { + if (streamRecordCase_ == 3) { + return closeStreamBuilder_.getMessage(); + } + return CloseStream.getDefaultInstance(); + } + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + public Builder setCloseStream(CloseStream value) { + if (closeStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + streamRecord_ = value; + onChanged(); + } else { + closeStreamBuilder_.setMessage(value); + } + streamRecordCase_ = 3; + return this; + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + public Builder setCloseStream(CloseStream.Builder builderForValue) { + if (closeStreamBuilder_ == null) { + streamRecord_ = builderForValue.build(); + onChanged(); + } else { + closeStreamBuilder_.setMessage(builderForValue.build()); + } + streamRecordCase_ = 3; + return this; + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + public Builder mergeCloseStream(CloseStream value) { + if (closeStreamBuilder_ == null) { + if (streamRecordCase_ == 3 && streamRecord_ != CloseStream.getDefaultInstance()) { + streamRecord_ = + CloseStream.newBuilder((CloseStream) streamRecord_).mergeFrom(value).buildPartial(); + } else { + streamRecord_ = value; + } + onChanged(); + } else { + if (streamRecordCase_ == 3) { + closeStreamBuilder_.mergeFrom(value); + } else { + closeStreamBuilder_.setMessage(value); + } + } + streamRecordCase_ = 3; + return this; + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + public Builder clearCloseStream() { + if (closeStreamBuilder_ == null) { + if (streamRecordCase_ == 3) { + streamRecordCase_ = 0; + streamRecord_ = null; + onChanged(); + } + } else { + if (streamRecordCase_ == 3) { + streamRecordCase_ = 0; + streamRecord_ = null; + } + closeStreamBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + public CloseStream.Builder getCloseStreamBuilder() { + return getCloseStreamFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + @Override + public CloseStreamOrBuilder getCloseStreamOrBuilder() { + if ((streamRecordCase_ == 3) && (closeStreamBuilder_ != null)) { + return closeStreamBuilder_.getMessageOrBuilder(); + } else { + if (streamRecordCase_ == 3) { + return (CloseStream) streamRecord_; + } + return CloseStream.getDefaultInstance(); + } + } + /** + * + * + *
+     * An indication that the stream should be closed.
+     * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + CloseStream, CloseStream.Builder, CloseStreamOrBuilder> + getCloseStreamFieldBuilder() { + if (closeStreamBuilder_ == null) { + if (!(streamRecordCase_ == 3)) { + streamRecord_ = CloseStream.getDefaultInstance(); + } + closeStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + CloseStream, CloseStream.Builder, CloseStreamOrBuilder>( + (CloseStream) streamRecord_, getParentForChildren(), isClean()); + streamRecord_ = null; + } + streamRecordCase_ = 3; + onChanged(); + ; + return closeStreamBuilder_; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ReadChangeStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse) + private static final ReadChangeStreamResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new ReadChangeStreamResponse(); + } + + public static ReadChangeStreamResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public ReadChangeStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadChangeStreamResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public ReadChangeStreamResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java new file mode 100644 index 0000000000..96f0b11d26 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java @@ -0,0 +1,132 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/bigtable.proto + +package com.google.bigtable.v2; + +public interface ReadChangeStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ReadChangeStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * A mutation to the partition.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + * + * @return Whether the dataChange field is set. + */ + boolean hasDataChange(); + /** + * + * + *
+   * A mutation to the partition.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + * + * @return The dataChange. + */ + ReadChangeStreamResponse.DataChange getDataChange(); + /** + * + * + *
+   * A mutation to the partition.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; + */ + ReadChangeStreamResponse.DataChangeOrBuilder getDataChangeOrBuilder(); + + /** + * + * + *
+   * A periodic heartbeat message.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + * + * @return Whether the heartbeat field is set. + */ + boolean hasHeartbeat(); + /** + * + * + *
+   * A periodic heartbeat message.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + * + * @return The heartbeat. + */ + ReadChangeStreamResponse.Heartbeat getHeartbeat(); + /** + * + * + *
+   * A periodic heartbeat message.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; + */ + ReadChangeStreamResponse.HeartbeatOrBuilder getHeartbeatOrBuilder(); + + /** + * + * + *
+   * An indication that the stream should be closed.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + * + * @return Whether the closeStream field is set. + */ + boolean hasCloseStream(); + /** + * + * + *
+   * An indication that the stream should be closed.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + * + * @return The closeStream. + */ + ReadChangeStreamResponse.CloseStream getCloseStream(); + /** + * + * + *
+   * An indication that the stream should be closed.
+   * 
+ * + * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; + */ + ReadChangeStreamResponse.CloseStreamOrBuilder getCloseStreamOrBuilder(); + + public ReadChangeStreamResponse.StreamRecordCase getStreamRecordCase(); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationToken.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationToken.java new file mode 100644 index 0000000000..5ecb0facf6 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationToken.java @@ -0,0 +1,884 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/data.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * The information required to continue reading the data from a
+ * `StreamPartition` from where a previous read left off.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.StreamContinuationToken} + */ +public final class StreamContinuationToken extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.StreamContinuationToken) + StreamContinuationTokenOrBuilder { + private static final long serialVersionUID = 0L; + // Use StreamContinuationToken.newBuilder() to construct. + private StreamContinuationToken(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamContinuationToken() { + token_ = ""; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new StreamContinuationToken(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StreamContinuationToken( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + StreamPartition.Builder subBuilder = null; + if (partition_ != null) { + subBuilder = partition_.toBuilder(); + } + partition_ = input.readMessage(StreamPartition.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(partition_); + partition_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + String s = input.readStringRequireUtf8(); + + token_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationToken_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationToken_fieldAccessorTable + .ensureFieldAccessorsInitialized(StreamContinuationToken.class, Builder.class); + } + + public static final int PARTITION_FIELD_NUMBER = 1; + private StreamPartition partition_; + /** + * + * + *
+   * The partition that this token applies to.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return Whether the partition field is set. + */ + @Override + public boolean hasPartition() { + return partition_ != null; + } + /** + * + * + *
+   * The partition that this token applies to.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return The partition. + */ + @Override + public StreamPartition getPartition() { + return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + } + /** + * + * + *
+   * The partition that this token applies to.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + @Override + public StreamPartitionOrBuilder getPartitionOrBuilder() { + return getPartition(); + } + + public static final int TOKEN_FIELD_NUMBER = 2; + private volatile Object token_; + /** + * + * + *
+   * An encoded position in the stream to restart reading from.
+   * 
+ * + * string token = 2; + * + * @return The token. + */ + @Override + public String getToken() { + Object ref = token_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + token_ = s; + return s; + } + } + /** + * + * + *
+   * An encoded position in the stream to restart reading from.
+   * 
+ * + * string token = 2; + * + * @return The bytes for token. + */ + @Override + public com.google.protobuf.ByteString getTokenBytes() { + Object ref = token_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + token_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (partition_ != null) { + output.writeMessage(1, getPartition()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(token_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, token_); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (partition_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPartition()); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(token_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, token_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof StreamContinuationToken)) { + return super.equals(obj); + } + StreamContinuationToken other = (StreamContinuationToken) obj; + + if (hasPartition() != other.hasPartition()) return false; + if (hasPartition()) { + if (!getPartition().equals(other.getPartition())) return false; + } + if (!getToken().equals(other.getToken())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPartition()) { + hash = (37 * hash) + PARTITION_FIELD_NUMBER; + hash = (53 * hash) + getPartition().hashCode(); + } + hash = (37 * hash) + TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getToken().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static StreamContinuationToken parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamContinuationToken parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamContinuationToken parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamContinuationToken parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamContinuationToken parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamContinuationToken parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamContinuationToken parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static StreamContinuationToken parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static StreamContinuationToken parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static StreamContinuationToken parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static StreamContinuationToken parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static StreamContinuationToken parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(StreamContinuationToken prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * The information required to continue reading the data from a
+   * `StreamPartition` from where a previous read left off.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.StreamContinuationToken} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.StreamContinuationToken) + StreamContinuationTokenOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationToken_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationToken_fieldAccessorTable + .ensureFieldAccessorsInitialized(StreamContinuationToken.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.StreamContinuationToken.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + if (partitionBuilder_ == null) { + partition_ = null; + } else { + partition_ = null; + partitionBuilder_ = null; + } + token_ = ""; + + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationToken_descriptor; + } + + @Override + public StreamContinuationToken getDefaultInstanceForType() { + return StreamContinuationToken.getDefaultInstance(); + } + + @Override + public StreamContinuationToken build() { + StreamContinuationToken result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public StreamContinuationToken buildPartial() { + StreamContinuationToken result = new StreamContinuationToken(this); + if (partitionBuilder_ == null) { + result.partition_ = partition_; + } else { + result.partition_ = partitionBuilder_.build(); + } + result.token_ = token_; + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof StreamContinuationToken) { + return mergeFrom((StreamContinuationToken) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(StreamContinuationToken other) { + if (other == StreamContinuationToken.getDefaultInstance()) return this; + if (other.hasPartition()) { + mergePartition(other.getPartition()); + } + if (!other.getToken().isEmpty()) { + token_ = other.token_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + StreamContinuationToken parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (StreamContinuationToken) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private StreamPartition partition_; + private com.google.protobuf.SingleFieldBuilderV3< + StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder> + partitionBuilder_; + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return Whether the partition field is set. + */ + public boolean hasPartition() { + return partitionBuilder_ != null || partition_ != null; + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return The partition. + */ + public StreamPartition getPartition() { + if (partitionBuilder_ == null) { + return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + } else { + return partitionBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder setPartition(StreamPartition value) { + if (partitionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + partition_ = value; + onChanged(); + } else { + partitionBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder setPartition(StreamPartition.Builder builderForValue) { + if (partitionBuilder_ == null) { + partition_ = builderForValue.build(); + onChanged(); + } else { + partitionBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder mergePartition(StreamPartition value) { + if (partitionBuilder_ == null) { + if (partition_ != null) { + partition_ = StreamPartition.newBuilder(partition_).mergeFrom(value).buildPartial(); + } else { + partition_ = value; + } + onChanged(); + } else { + partitionBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public Builder clearPartition() { + if (partitionBuilder_ == null) { + partition_ = null; + onChanged(); + } else { + partition_ = null; + partitionBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public StreamPartition.Builder getPartitionBuilder() { + + onChanged(); + return getPartitionFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + public StreamPartitionOrBuilder getPartitionOrBuilder() { + if (partitionBuilder_ != null) { + return partitionBuilder_.getMessageOrBuilder(); + } else { + return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + } + } + /** + * + * + *
+     * The partition that this token applies to.
+     * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder> + getPartitionFieldBuilder() { + if (partitionBuilder_ == null) { + partitionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder>( + getPartition(), getParentForChildren(), isClean()); + partition_ = null; + } + return partitionBuilder_; + } + + private Object token_ = ""; + /** + * + * + *
+     * An encoded position in the stream to restart reading from.
+     * 
+ * + * string token = 2; + * + * @return The token. + */ + public String getToken() { + Object ref = token_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + token_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * + * + *
+     * An encoded position in the stream to restart reading from.
+     * 
+ * + * string token = 2; + * + * @return The bytes for token. + */ + public com.google.protobuf.ByteString getTokenBytes() { + Object ref = token_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + token_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * An encoded position in the stream to restart reading from.
+     * 
+ * + * string token = 2; + * + * @param value The token to set. + * @return This builder for chaining. + */ + public Builder setToken(String value) { + if (value == null) { + throw new NullPointerException(); + } + + token_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * An encoded position in the stream to restart reading from.
+     * 
+ * + * string token = 2; + * + * @return This builder for chaining. + */ + public Builder clearToken() { + + token_ = getDefaultInstance().getToken(); + onChanged(); + return this; + } + /** + * + * + *
+     * An encoded position in the stream to restart reading from.
+     * 
+ * + * string token = 2; + * + * @param value The bytes for token to set. + * @return This builder for chaining. + */ + public Builder setTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + token_ = value; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.StreamContinuationToken) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.StreamContinuationToken) + private static final StreamContinuationToken DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new StreamContinuationToken(); + } + + public static StreamContinuationToken getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public StreamContinuationToken parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StreamContinuationToken(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public StreamContinuationToken getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokenOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokenOrBuilder.java new file mode 100644 index 0000000000..4bb2598892 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokenOrBuilder.java @@ -0,0 +1,85 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/data.proto + +package com.google.bigtable.v2; + +public interface StreamContinuationTokenOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.StreamContinuationToken) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The partition that this token applies to.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return Whether the partition field is set. + */ + boolean hasPartition(); + /** + * + * + *
+   * The partition that this token applies to.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + * + * @return The partition. + */ + StreamPartition getPartition(); + /** + * + * + *
+   * The partition that this token applies to.
+   * 
+ * + * .google.bigtable.v2.StreamPartition partition = 1; + */ + StreamPartitionOrBuilder getPartitionOrBuilder(); + + /** + * + * + *
+   * An encoded position in the stream to restart reading from.
+   * 
+ * + * string token = 2; + * + * @return The token. + */ + String getToken(); + /** + * + * + *
+   * An encoded position in the stream to restart reading from.
+   * 
+ * + * string token = 2; + * + * @return The bytes for token. + */ + com.google.protobuf.ByteString getTokenBytes(); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokens.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokens.java new file mode 100644 index 0000000000..3161bf7759 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokens.java @@ -0,0 +1,929 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/data.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * The information required to continue reading the data from multiple
+ * `StreamPartitions` from where a previous read left off.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.StreamContinuationTokens} + */ +public final class StreamContinuationTokens extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.StreamContinuationTokens) + StreamContinuationTokensOrBuilder { + private static final long serialVersionUID = 0L; + // Use StreamContinuationTokens.newBuilder() to construct. + private StreamContinuationTokens(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamContinuationTokens() { + tokens_ = java.util.Collections.emptyList(); + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new StreamContinuationTokens(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StreamContinuationTokens( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + tokens_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tokens_.add(input.readMessage(StreamContinuationToken.parser(), extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + tokens_ = java.util.Collections.unmodifiableList(tokens_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationTokens_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationTokens_fieldAccessorTable + .ensureFieldAccessorsInitialized(StreamContinuationTokens.class, Builder.class); + } + + public static final int TOKENS_FIELD_NUMBER = 1; + private java.util.List tokens_; + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + @Override + public java.util.List getTokensList() { + return tokens_; + } + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + @Override + public java.util.List getTokensOrBuilderList() { + return tokens_; + } + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + @Override + public int getTokensCount() { + return tokens_.size(); + } + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + @Override + public StreamContinuationToken getTokens(int index) { + return tokens_.get(index); + } + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + @Override + public StreamContinuationTokenOrBuilder getTokensOrBuilder(int index) { + return tokens_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < tokens_.size(); i++) { + output.writeMessage(1, tokens_.get(i)); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tokens_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, tokens_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof StreamContinuationTokens)) { + return super.equals(obj); + } + StreamContinuationTokens other = (StreamContinuationTokens) obj; + + if (!getTokensList().equals(other.getTokensList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getTokensCount() > 0) { + hash = (37 * hash) + TOKENS_FIELD_NUMBER; + hash = (53 * hash) + getTokensList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static StreamContinuationTokens parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamContinuationTokens parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamContinuationTokens parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamContinuationTokens parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamContinuationTokens parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamContinuationTokens parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamContinuationTokens parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static StreamContinuationTokens parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static StreamContinuationTokens parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static StreamContinuationTokens parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static StreamContinuationTokens parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static StreamContinuationTokens parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(StreamContinuationTokens prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * The information required to continue reading the data from multiple
+   * `StreamPartitions` from where a previous read left off.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.StreamContinuationTokens} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.StreamContinuationTokens) + StreamContinuationTokensOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationTokens_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return DataProto + .internal_static_google_bigtable_v2_StreamContinuationTokens_fieldAccessorTable + .ensureFieldAccessorsInitialized(StreamContinuationTokens.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.StreamContinuationTokens.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getTokensFieldBuilder(); + } + } + + @Override + public Builder clear() { + super.clear(); + if (tokensBuilder_ == null) { + tokens_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tokensBuilder_.clear(); + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return DataProto.internal_static_google_bigtable_v2_StreamContinuationTokens_descriptor; + } + + @Override + public StreamContinuationTokens getDefaultInstanceForType() { + return StreamContinuationTokens.getDefaultInstance(); + } + + @Override + public StreamContinuationTokens build() { + StreamContinuationTokens result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public StreamContinuationTokens buildPartial() { + StreamContinuationTokens result = new StreamContinuationTokens(this); + int from_bitField0_ = bitField0_; + if (tokensBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + tokens_ = java.util.Collections.unmodifiableList(tokens_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tokens_ = tokens_; + } else { + result.tokens_ = tokensBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof StreamContinuationTokens) { + return mergeFrom((StreamContinuationTokens) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(StreamContinuationTokens other) { + if (other == StreamContinuationTokens.getDefaultInstance()) return this; + if (tokensBuilder_ == null) { + if (!other.tokens_.isEmpty()) { + if (tokens_.isEmpty()) { + tokens_ = other.tokens_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTokensIsMutable(); + tokens_.addAll(other.tokens_); + } + onChanged(); + } + } else { + if (!other.tokens_.isEmpty()) { + if (tokensBuilder_.isEmpty()) { + tokensBuilder_.dispose(); + tokensBuilder_ = null; + tokens_ = other.tokens_; + bitField0_ = (bitField0_ & ~0x00000001); + tokensBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getTokensFieldBuilder() + : null; + } else { + tokensBuilder_.addAllMessages(other.tokens_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + StreamContinuationTokens parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (StreamContinuationTokens) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List tokens_ = java.util.Collections.emptyList(); + + private void ensureTokensIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + tokens_ = new java.util.ArrayList(tokens_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder> + tokensBuilder_; + + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public java.util.List getTokensList() { + if (tokensBuilder_ == null) { + return java.util.Collections.unmodifiableList(tokens_); + } else { + return tokensBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public int getTokensCount() { + if (tokensBuilder_ == null) { + return tokens_.size(); + } else { + return tokensBuilder_.getCount(); + } + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public StreamContinuationToken getTokens(int index) { + if (tokensBuilder_ == null) { + return tokens_.get(index); + } else { + return tokensBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder setTokens(int index, StreamContinuationToken value) { + if (tokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTokensIsMutable(); + tokens_.set(index, value); + onChanged(); + } else { + tokensBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder setTokens(int index, StreamContinuationToken.Builder builderForValue) { + if (tokensBuilder_ == null) { + ensureTokensIsMutable(); + tokens_.set(index, builderForValue.build()); + onChanged(); + } else { + tokensBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder addTokens(StreamContinuationToken value) { + if (tokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTokensIsMutable(); + tokens_.add(value); + onChanged(); + } else { + tokensBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder addTokens(int index, StreamContinuationToken value) { + if (tokensBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTokensIsMutable(); + tokens_.add(index, value); + onChanged(); + } else { + tokensBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder addTokens(StreamContinuationToken.Builder builderForValue) { + if (tokensBuilder_ == null) { + ensureTokensIsMutable(); + tokens_.add(builderForValue.build()); + onChanged(); + } else { + tokensBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder addTokens(int index, StreamContinuationToken.Builder builderForValue) { + if (tokensBuilder_ == null) { + ensureTokensIsMutable(); + tokens_.add(index, builderForValue.build()); + onChanged(); + } else { + tokensBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder addAllTokens(Iterable values) { + if (tokensBuilder_ == null) { + ensureTokensIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, tokens_); + onChanged(); + } else { + tokensBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder clearTokens() { + if (tokensBuilder_ == null) { + tokens_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tokensBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public Builder removeTokens(int index) { + if (tokensBuilder_ == null) { + ensureTokensIsMutable(); + tokens_.remove(index); + onChanged(); + } else { + tokensBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public StreamContinuationToken.Builder getTokensBuilder(int index) { + return getTokensFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public StreamContinuationTokenOrBuilder getTokensOrBuilder(int index) { + if (tokensBuilder_ == null) { + return tokens_.get(index); + } else { + return tokensBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public java.util.List getTokensOrBuilderList() { + if (tokensBuilder_ != null) { + return tokensBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tokens_); + } + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public StreamContinuationToken.Builder addTokensBuilder() { + return getTokensFieldBuilder().addBuilder(StreamContinuationToken.getDefaultInstance()); + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public StreamContinuationToken.Builder addTokensBuilder(int index) { + return getTokensFieldBuilder() + .addBuilder(index, StreamContinuationToken.getDefaultInstance()); + } + /** + * + * + *
+     * List of continuation tokens.
+     * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + public java.util.List getTokensBuilderList() { + return getTokensFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder> + getTokensFieldBuilder() { + if (tokensBuilder_ == null) { + tokensBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + StreamContinuationToken, + StreamContinuationToken.Builder, + StreamContinuationTokenOrBuilder>( + tokens_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + tokens_ = null; + } + return tokensBuilder_; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.StreamContinuationTokens) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.StreamContinuationTokens) + private static final StreamContinuationTokens DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new StreamContinuationTokens(); + } + + public static StreamContinuationTokens getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public StreamContinuationTokens parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StreamContinuationTokens(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public StreamContinuationTokens getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokensOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokensOrBuilder.java new file mode 100644 index 0000000000..091c4687e1 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamContinuationTokensOrBuilder.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/data.proto + +package com.google.bigtable.v2; + +public interface StreamContinuationTokensOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.StreamContinuationTokens) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + java.util.List getTokensList(); + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + StreamContinuationToken getTokens(int index); + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + int getTokensCount(); + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + java.util.List getTokensOrBuilderList(); + /** + * + * + *
+   * List of continuation tokens.
+   * 
+ * + * repeated .google.bigtable.v2.StreamContinuationToken tokens = 1; + */ + StreamContinuationTokenOrBuilder getTokensOrBuilder(int index); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartition.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartition.java new file mode 100644 index 0000000000..612515c39a --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartition.java @@ -0,0 +1,712 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/data.proto + +package com.google.bigtable.v2; + +/** + * + * + *
+ * NOTE: This API is not generally available. Users must be allowlisted.
+ * A partition of a change stream.
+ * 
+ * + * Protobuf type {@code google.bigtable.v2.StreamPartition} + */ +public final class StreamPartition extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.v2.StreamPartition) + StreamPartitionOrBuilder { + private static final long serialVersionUID = 0L; + // Use StreamPartition.newBuilder() to construct. + private StreamPartition(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamPartition() {} + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new StreamPartition(); + } + + @Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StreamPartition( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + RowRange.Builder subBuilder = null; + if (rowRange_ != null) { + subBuilder = rowRange_.toBuilder(); + } + rowRange_ = input.readMessage(RowRange.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rowRange_); + rowRange_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return DataProto.internal_static_google_bigtable_v2_StreamPartition_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return DataProto.internal_static_google_bigtable_v2_StreamPartition_fieldAccessorTable + .ensureFieldAccessorsInitialized(StreamPartition.class, Builder.class); + } + + public static final int ROW_RANGE_FIELD_NUMBER = 1; + private RowRange rowRange_; + /** + * + * + *
+   * The row range covered by this partition and is specified by
+   * [`start_key_closed`, `end_key_open`).
+   * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + * + * @return Whether the rowRange field is set. + */ + @Override + public boolean hasRowRange() { + return rowRange_ != null; + } + /** + * + * + *
+   * The row range covered by this partition and is specified by
+   * [`start_key_closed`, `end_key_open`).
+   * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + * + * @return The rowRange. + */ + @Override + public RowRange getRowRange() { + return rowRange_ == null ? RowRange.getDefaultInstance() : rowRange_; + } + /** + * + * + *
+   * The row range covered by this partition and is specified by
+   * [`start_key_closed`, `end_key_open`).
+   * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + @Override + public RowRangeOrBuilder getRowRangeOrBuilder() { + return getRowRange(); + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (rowRange_ != null) { + output.writeMessage(1, getRowRange()); + } + unknownFields.writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (rowRange_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getRowRange()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof StreamPartition)) { + return super.equals(obj); + } + StreamPartition other = (StreamPartition) obj; + + if (hasRowRange() != other.hasRowRange()) return false; + if (hasRowRange()) { + if (!getRowRange().equals(other.getRowRange())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasRowRange()) { + hash = (37 * hash) + ROW_RANGE_FIELD_NUMBER; + hash = (53 * hash) + getRowRange().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static StreamPartition parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamPartition parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamPartition parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamPartition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamPartition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static StreamPartition parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static StreamPartition parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static StreamPartition parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static StreamPartition parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static StreamPartition parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static StreamPartition parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static StreamPartition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(StreamPartition prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * NOTE: This API is not generally available. Users must be allowlisted.
+   * A partition of a change stream.
+   * 
+ * + * Protobuf type {@code google.bigtable.v2.StreamPartition} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.StreamPartition) + StreamPartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return DataProto.internal_static_google_bigtable_v2_StreamPartition_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return DataProto.internal_static_google_bigtable_v2_StreamPartition_fieldAccessorTable + .ensureFieldAccessorsInitialized(StreamPartition.class, Builder.class); + } + + // Construct using com.google.bigtable.v2.StreamPartition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @Override + public Builder clear() { + super.clear(); + if (rowRangeBuilder_ == null) { + rowRange_ = null; + } else { + rowRange_ = null; + rowRangeBuilder_ = null; + } + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return DataProto.internal_static_google_bigtable_v2_StreamPartition_descriptor; + } + + @Override + public StreamPartition getDefaultInstanceForType() { + return StreamPartition.getDefaultInstance(); + } + + @Override + public StreamPartition build() { + StreamPartition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public StreamPartition buildPartial() { + StreamPartition result = new StreamPartition(this); + if (rowRangeBuilder_ == null) { + result.rowRange_ = rowRange_; + } else { + result.rowRange_ = rowRangeBuilder_.build(); + } + onBuilt(); + return result; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof StreamPartition) { + return mergeFrom((StreamPartition) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(StreamPartition other) { + if (other == StreamPartition.getDefaultInstance()) return this; + if (other.hasRowRange()) { + mergeRowRange(other.getRowRange()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + StreamPartition parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (StreamPartition) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private RowRange rowRange_; + private com.google.protobuf.SingleFieldBuilderV3 + rowRangeBuilder_; + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + * + * @return Whether the rowRange field is set. + */ + public boolean hasRowRange() { + return rowRangeBuilder_ != null || rowRange_ != null; + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + * + * @return The rowRange. + */ + public RowRange getRowRange() { + if (rowRangeBuilder_ == null) { + return rowRange_ == null ? RowRange.getDefaultInstance() : rowRange_; + } else { + return rowRangeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + public Builder setRowRange(RowRange value) { + if (rowRangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rowRange_ = value; + onChanged(); + } else { + rowRangeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + public Builder setRowRange(RowRange.Builder builderForValue) { + if (rowRangeBuilder_ == null) { + rowRange_ = builderForValue.build(); + onChanged(); + } else { + rowRangeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + public Builder mergeRowRange(RowRange value) { + if (rowRangeBuilder_ == null) { + if (rowRange_ != null) { + rowRange_ = RowRange.newBuilder(rowRange_).mergeFrom(value).buildPartial(); + } else { + rowRange_ = value; + } + onChanged(); + } else { + rowRangeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + public Builder clearRowRange() { + if (rowRangeBuilder_ == null) { + rowRange_ = null; + onChanged(); + } else { + rowRange_ = null; + rowRangeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + public RowRange.Builder getRowRangeBuilder() { + + onChanged(); + return getRowRangeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + public RowRangeOrBuilder getRowRangeOrBuilder() { + if (rowRangeBuilder_ != null) { + return rowRangeBuilder_.getMessageOrBuilder(); + } else { + return rowRange_ == null ? RowRange.getDefaultInstance() : rowRange_; + } + } + /** + * + * + *
+     * The row range covered by this partition and is specified by
+     * [`start_key_closed`, `end_key_open`).
+     * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3 + getRowRangeFieldBuilder() { + if (rowRangeBuilder_ == null) { + rowRangeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + RowRange, RowRange.Builder, RowRangeOrBuilder>( + getRowRange(), getParentForChildren(), isClean()); + rowRange_ = null; + } + return rowRangeBuilder_; + } + + @Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.StreamPartition) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.v2.StreamPartition) + private static final StreamPartition DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new StreamPartition(); + } + + public static StreamPartition getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public StreamPartition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StreamPartition(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public StreamPartition getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartitionOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartitionOrBuilder.java new file mode 100644 index 0000000000..0ea28ebae1 --- /dev/null +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/StreamPartitionOrBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/v2/data.proto + +package com.google.bigtable.v2; + +public interface StreamPartitionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.StreamPartition) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * The row range covered by this partition and is specified by
+   * [`start_key_closed`, `end_key_open`).
+   * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + * + * @return Whether the rowRange field is set. + */ + boolean hasRowRange(); + /** + * + * + *
+   * The row range covered by this partition and is specified by
+   * [`start_key_closed`, `end_key_open`).
+   * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + * + * @return The rowRange. + */ + RowRange getRowRange(); + /** + * + * + *
+   * The row range covered by this partition and is specified by
+   * [`start_key_closed`, `end_key_open`).
+   * 
+ * + * .google.bigtable.v2.RowRange row_range = 1; + */ + RowRangeOrBuilder getRowRangeOrBuilder(); +} diff --git a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto index 215b573cbd..a99bb410a1 100644 --- a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto +++ b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto @@ -22,6 +22,8 @@ import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/api/routing.proto"; import "google/bigtable/v2/data.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "google/rpc/status.proto"; @@ -198,6 +200,34 @@ service Bigtable { option (google.api.method_signature) = "table_name,row_key,rules"; option (google.api.method_signature) = "table_name,row_key,rules,app_profile_id"; } + + // NOTE: This API is not generally available. Users must be allowlisted. + // Returns the current list of partitions that make up the table's + // change stream. The union of partitions will cover the entire keyspace. + // Partitions can be read with `ReadChangeStream`. + rpc ListChangeStreamPartitions(ListChangeStreamPartitionsRequest) + returns (stream ListChangeStreamPartitionsResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:listChangeStreamPartitions" + body: "*" + }; + option (google.api.method_signature) = "table_name"; + option (google.api.method_signature) = "table_name,app_profile_id"; + } + + // NOTE: This API is not generally available. Users must be allowlisted. + // Reads changes from a table's change stream. Changes will + // reflect both user-initiated mutations and mutations that are caused by + // garbage collection. + rpc ReadChangeStream(ReadChangeStreamRequest) + returns (stream ReadChangeStreamResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" + body: "*" + }; + option (google.api.method_signature) = "table_name"; + option (google.api.method_signature) = "table_name,app_profile_id"; + } } // Request message for Bigtable.ReadRows. @@ -526,3 +556,219 @@ message ReadModifyWriteRowResponse { // A Row containing the new contents of all cells modified by the request. Row row = 1; } + +// NOTE: This API is not generally available. Users must be allowlisted. +// Request message for Bigtable.ListChangeStreamPartitions. +message ListChangeStreamPartitionsRequest { + // Required. The unique name of the table from which to get change stream + // partitions. Values are of the form + // `projects//instances//tables/`. + // Change streaming must be enabled on the table. + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtableadmin.googleapis.com/Table" + } + ]; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + // Single cluster routing must be configured on the profile. + string app_profile_id = 2; +} + +// NOTE: This API is not generally available. Users must be allowlisted. +// Response message for Bigtable.ListChangeStreamPartitions. +message ListChangeStreamPartitionsResponse { + // A partition of the change stream. + StreamPartition partition = 1; +} + +// NOTE: This API is not generally available. Users must be allowlisted. +// Request message for Bigtable.ReadChangeStream. +message ReadChangeStreamRequest { + // Required. The unique name of the table from which to read a change stream. + // Values are of the form + // `projects//instances//tables/
`. + // Change streaming must be enabled on the table. + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtableadmin.googleapis.com/Table" + } + ]; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + // Single cluster routing must be configured on the profile. + string app_profile_id = 2; + + // The partition to read changes from. + StreamPartition partition = 3; + + // Options for describing where we want to start reading from the stream. + oneof start_from { + // Start reading the stream at the specified timestamp. This timestamp must + // be within the change stream retention period, less than or equal to the + // current time, and after change stream creation, whichever is greater. + // This value is inclusive and will be truncated to microsecond granularity. + google.protobuf.Timestamp start_time = 4; + + // Tokens that describe how to resume reading a stream where reading + // previously left off. If specified, changes will be read starting at the + // the position. Tokens are delivered on the stream as part of `Heartbeat` + // and `CloseStream` messages. + // + // If a single token is provided, the token’s partition must exactly match + // the request’s partition. If multiple tokens are provided, as in the case + // of a partition merge, the union of the token partitions must exactly + // cover the request’s partition. Otherwise, INVALID_ARGUMENT will be + // returned. + StreamContinuationTokens continuation_tokens = 6; + } + + // If specified, OK will be returned when the stream advances beyond + // this time. Otherwise, changes will be continuously delivered on the stream. + // This value is inclusive and will be truncated to microsecond granularity. + google.protobuf.Timestamp end_time = 5; + + // If specified, the duration between `Heartbeat` messages on the stream. + // Otherwise, defaults to 5 seconds. + google.protobuf.Duration heartbeat_duration = 7; +} + +// NOTE: This API is not generally available. Users must be allowlisted. +// Response message for Bigtable.ReadChangeStream. +message ReadChangeStreamResponse { + // A partial or complete mutation. + message MutationChunk { + // Information about the chunking of this mutation. + // Only `SetCell` mutations can be chunked, and all chunks for a `SetCell` + // will be delivered contiguously with no other mutation types interleaved. + message ChunkInfo { + // The total value size of all the chunks that make up the `SetCell`. + int32 chunked_value_size = 1; + + // The byte offset of this chunk into the total value size of the + // mutation. + int32 chunked_value_offset = 2; + + // When true, this is the last chunk of a chunked `SetCell`. + bool last_chunk = 3; + } + + // If set, then the mutation is a `SetCell` with a chunked value across + // multiple messages. + ChunkInfo chunk_info = 1; + + // If this is a continuation of a chunked message (`chunked_value_offset` > + // 0), ignore all fields except the `SetCell`'s value and merge it with + // the previous message by concatenating the value fields. + Mutation mutation = 2; + } + + // A message corresponding to one or more mutations to the partition + // being streamed. A single logical `DataChange` message may also be split + // across a sequence of multiple individual messages. Messages other than + // the first in a sequence will only have the `type` and `chunks` fields + // populated, with the final message in the sequence also containing `done` + // set to true. + message DataChange { + // The type of mutation. + enum Type { + // The type is unspecified. + TYPE_UNSPECIFIED = 0; + + // A user-initiated mutation. + USER = 1; + + // A system-initiated mutation as part of garbage collection. + // https://cloud.google.com/bigtable/docs/garbage-collection + GARBAGE_COLLECTION = 2; + + // This is a continuation of a multi-message change. + CONTINUATION = 3; + } + + // The type of the mutation. + Type type = 1; + + // The cluster where the mutation was applied. + // Not set when `type` is `GARBAGE_COLLECTION`. + string source_cluster_id = 2; + + // The row key for all mutations that are part of this `DataChange`. + // If the `DataChange` is chunked across multiple messages, then this field + // will only be set for the first message. + bytes row_key = 3; + + // The timestamp at which the mutation was applied on the Bigtable server. + google.protobuf.Timestamp commit_timestamp = 4; + + // A value that lets stream consumers reconstruct Bigtable's + // conflict resolution semantics. + // https://cloud.google.com/bigtable/docs/writes#conflict-resolution + // In the event that the same row key, column family, column qualifier, + // timestamp are modified on different clusters at the same + // `commit_timestamp`, the mutation with the larger `tiebreaker` will be the + // one chosen for the eventually consistent state of the system. + int32 tiebreaker = 5; + + // The mutations associated with this change to the partition. + // May contain complete mutations or chunks of a multi-message chunked + // `DataChange` record. + repeated MutationChunk chunks = 6; + + // When true, indicates that the entire `DataChange` has been read + // and the client can safely process the message. + bool done = 8; + + // An encoded position for this stream's partition to restart reading from. + // This token is for the StreamPartition from the request. + string token = 9; + + // A commit timestamp that is lower than or equal to any timestamp for a + // record that will be delivered in the future on the stream. For an example + // usage see https://beam.apache.org/documentation/basics/#watermarks + google.protobuf.Timestamp low_watermark = 10; + } + + // A periodic message with information that can be used to checkpoint + // the state of a stream. + message Heartbeat { + // A token that can be provided to a subsequent `ReadChangeStream` call + // to pick up reading at the current stream position. + StreamContinuationToken continuation_token = 1; + + // A commit timestamp that is lower than or equal to any timestamp for a + // record that will be delivered in the future on the stream. For an example + // usage see https://beam.apache.org/documentation/basics/#watermarks + google.protobuf.Timestamp low_watermark = 2; + } + + // A message indicating that the client should stop reading from the stream. + // If status is OK and `continuation_tokens` is empty, the stream has finished + // (for example if there was an `end_time` specified). + // If `continuation_tokens` is present, then a change in partitioning requires + // the client to open a new stream for each token to resume reading. + message CloseStream { + // The status of the stream. + google.rpc.Status status = 1; + + // If non-empty, contains the information needed to start reading the new + // partition(s) that contain segments of this partition's row range. + repeated StreamContinuationToken continuation_tokens = 2; + } + + // The data or control message on the stream. + oneof stream_record { + // A mutation to the partition. + DataChange data_change = 1; + + // A periodic heartbeat message. + Heartbeat heartbeat = 2; + + // An indication that the stream should be closed. + CloseStream close_stream = 3; + } +} diff --git a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/data.proto b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/data.proto index 9e5a05c2ea..4881526e5a 100644 --- a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/data.proto +++ b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/data.proto @@ -533,3 +533,30 @@ message ReadModifyWriteRule { int64 increment_amount = 4; } } + +// NOTE: This API is not generally available. Users must be allowlisted. +// A partition of a change stream. +message StreamPartition { + // The row range covered by this partition and is specified by + // [`start_key_closed`, `end_key_open`). + RowRange row_range = 1; +} + +// NOTE: This API is not generally available. Users must be allowlisted. +// The information required to continue reading the data from multiple +// `StreamPartitions` from where a previous read left off. +message StreamContinuationTokens { + // List of continuation tokens. + repeated StreamContinuationToken tokens = 1; +} + +// NOTE: This API is not generally available. Users must be allowlisted. +// The information required to continue reading the data from a +// `StreamPartition` from where a previous read left off. +message StreamContinuationToken { + // The partition that this token applies to. + StreamPartition partition = 1; + + // An encoded position in the stream to restart reading from. + string token = 2; +} From c529f192f3ec032c1863b44cddc75ca583117110 Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Wed, 20 Jul 2022 14:36:30 -0400 Subject: [PATCH 02/13] feat: Add ListChangeStreamPartitions callable (#1312) * feat: Add ListChangeStreamPartitions callable * feat: Change return type of ListChangeStreamPartitions to RowRange * feat: Fix format for ListChangeStreamPartitions * fix: Address comments for ListChangeStreamPartitionsCallable * feat: Add comments for IntervalApi for ListChangeStreamPartitions * feat: Ignore renaming of ReadRowsConvertExceptionCallable Co-authored-by: Teng Zhong --- .../clirr-ignored-differences.xml | 5 + .../bigtable/data/v2/BigtableDataClient.java | 138 ++++++++++++++++++ ...va => ConvertStreamExceptionCallable.java} | 27 ++-- .../data/v2/stub/EnhancedBigtableStub.java | 85 ++++++++++- .../v2/stub/EnhancedBigtableStubSettings.java | 35 +++++ ...istChangeStreamPartitionsUserCallable.java | 93 ++++++++++++ .../data/v2/BigtableDataClientTests.java | 34 +++++ .../ConvertStreamExceptionCallableTest.java | 76 ++++++++++ .../EnhancedBigtableStubSettingsTest.java | 1 + ...hangeStreamPartitionsUserCallableTest.java | 87 +++++++++++ 10 files changed, 566 insertions(+), 15 deletions(-) rename google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/{readrows/ReadRowsConvertExceptionCallable.java => ConvertStreamExceptionCallable.java} (69%) create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallableTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java diff --git a/google-cloud-bigtable/clirr-ignored-differences.xml b/google-cloud-bigtable/clirr-ignored-differences.xml index 588327d0de..3fa8f3ee1e 100644 --- a/google-cloud-bigtable/clirr-ignored-differences.xml +++ b/google-cloud-bigtable/clirr-ignored-differences.xml @@ -39,6 +39,11 @@ 8001 com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerFactory + + + 8001 + com/google/cloud/bigtable/data/v2/stub/readrows/ReadRowsConvertExceptionCallable + 8001 diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java index ce9a57fa7e..38bc4dc811 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java @@ -29,6 +29,7 @@ import com.google.api.gax.rpc.ServerStream; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.UnaryCallable; +import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters; @@ -1489,6 +1490,143 @@ public UnaryCallable readModifyWriteRowCallable() { return stub.readModifyWriteRowCallable(); } + /** + * Convenience method for synchronously streaming the partitions of a table. The returned + * ServerStream instance is not threadsafe, it can only be used from single thread. + * + *

Sample code: + * + *

{@code
+   * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+   *   String tableId = "[TABLE]";
+   *
+   *   try {
+   *     ServerStream stream = bigtableDataClient.listChangeStreamPartitions(tableId);
+   *     int count = 0;
+   *
+   *     // Iterator style
+   *     for (RowRange partition : stream) {
+   *       if (++count > 10) {
+   *         stream.cancel();
+   *         break;
+   *       }
+   *       // Do something with partition
+   *     }
+   *   } catch (NotFoundException e) {
+   *     System.out.println("Tried to read a non-existent table");
+   *   } catch (RuntimeException e) {
+   *     e.printStackTrace();
+   *   }
+   * }
+   * }
+ * + * @see ServerStreamingCallable For call styles. + */ + @InternalApi("Used in Changestream beam pipeline.") + public ServerStream listChangeStreamPartitions(String tableId) { + return listChangeStreamPartitionsCallable().call(tableId); + } + + /** + * Convenience method for asynchronously streaming the partitions of a table. + * + *

Sample code: + * + *

{@code
+   * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+   *   String tableId = "[TABLE]";
+   *
+   *   bigtableDataClient.listChangeStreamPartitionsAsync(tableId, new ResponseObserver() {
+   *     StreamController controller;
+   *     int count = 0;
+   *
+   *     public void onStart(StreamController controller) {
+   *       this.controller = controller;
+   *     }
+   *     public void onResponse(RowRange partition) {
+   *       if (++count > 10) {
+   *         controller.cancel();
+   *         return;
+   *       }
+   *       // Do something with partition
+   *     }
+   *     public void onError(Throwable t) {
+   *       if (t instanceof NotFoundException) {
+   *         System.out.println("Tried to read a non-existent table");
+   *       } else {
+   *         t.printStackTrace();
+   *       }
+   *     }
+   *     public void onComplete() {
+   *       // Handle stream completion
+   *     }
+   *   });
+   * }
+   * }
+ */ + @InternalApi("Used in Changestream beam pipeline.") + public void listChangeStreamPartitionsAsync(String tableId, ResponseObserver observer) { + listChangeStreamPartitionsCallable().call(tableId, observer); + } + + /** + * Streams back the results of the query. The returned callable object allows for customization of + * api invocation. + * + *

Sample code: + * + *

{@code
+   * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+   *   String tableId = "[TABLE]";
+   *
+   *   // Iterator style
+   *   try {
+   *     for(RowRange partition : bigtableDataClient.listChangeStreamPartitionsCallable().call(tableId)) {
+   *       // Do something with partition
+   *     }
+   *   } catch (NotFoundException e) {
+   *     System.out.println("Tried to read a non-existent table");
+   *   } catch (RuntimeException e) {
+   *     e.printStackTrace();
+   *   }
+   *
+   *   // Sync style
+   *   try {
+   *     List partitions = bigtableDataClient.listChangeStreamPartitionsCallable().all().call(tableId);
+   *   } catch (NotFoundException e) {
+   *     System.out.println("Tried to read a non-existent table");
+   *   } catch (RuntimeException e) {
+   *     e.printStackTrace();
+   *   }
+   *
+   *   // Point look up
+   *   ApiFuture partitionFuture =
+   *     bigtableDataClient.listChangeStreamPartitionsCallable().first().futureCall(tableId);
+   *
+   *   ApiFutures.addCallback(partitionFuture, new ApiFutureCallback() {
+   *     public void onFailure(Throwable t) {
+   *       if (t instanceof NotFoundException) {
+   *         System.out.println("Tried to read a non-existent table");
+   *       } else {
+   *         t.printStackTrace();
+   *       }
+   *     }
+   *     public void onSuccess(RowRange result) {
+   *       System.out.println("Got partition: " + result);
+   *     }
+   *   }, MoreExecutors.directExecutor());
+   *
+   *   // etc
+   * }
+   * }
+ * + * @see ServerStreamingCallable For call styles. + */ + @InternalApi("Used in Changestream beam pipeline.") + public ServerStreamingCallable listChangeStreamPartitionsCallable() { + return stub.listChangeStreamPartitionsCallable(); + } + /** Close the clients and releases all associated resources. */ @Override public void close() { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/readrows/ReadRowsConvertExceptionCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallable.java similarity index 69% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/readrows/ReadRowsConvertExceptionCallable.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallable.java index 0c58f66441..55a0d390fb 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/readrows/ReadRowsConvertExceptionCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallable.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.readrows; +package com.google.cloud.bigtable.data.v2.stub; import com.google.api.core.InternalApi; import com.google.api.gax.rpc.ApiCallContext; @@ -27,29 +27,30 @@ * This callable converts the "Received rst stream" exception into a retryable {@link ApiException}. */ @InternalApi -public final class ReadRowsConvertExceptionCallable - extends ServerStreamingCallable { +public final class ConvertStreamExceptionCallable + extends ServerStreamingCallable { - private final ServerStreamingCallable innerCallable; + private final ServerStreamingCallable innerCallable; - public ReadRowsConvertExceptionCallable( - ServerStreamingCallable innerCallable) { + public ConvertStreamExceptionCallable( + ServerStreamingCallable innerCallable) { this.innerCallable = innerCallable; } @Override public void call( - ReadRowsRequest request, ResponseObserver responseObserver, ApiCallContext context) { - ReadRowsConvertExceptionResponseObserver observer = - new ReadRowsConvertExceptionResponseObserver<>(responseObserver); + RequestT request, ResponseObserver responseObserver, ApiCallContext context) { + ConvertStreamExceptionResponseObserver observer = + new ConvertStreamExceptionResponseObserver<>(responseObserver); innerCallable.call(request, observer, context); } - private class ReadRowsConvertExceptionResponseObserver implements ResponseObserver { + private class ConvertStreamExceptionResponseObserver + implements ResponseObserver { - private final ResponseObserver outerObserver; + private final ResponseObserver outerObserver; - ReadRowsConvertExceptionResponseObserver(ResponseObserver outerObserver) { + ConvertStreamExceptionResponseObserver(ResponseObserver outerObserver) { this.outerObserver = outerObserver; } @@ -59,7 +60,7 @@ public void onStart(StreamController controller) { } @Override - public void onResponse(RowT response) { + public void onResponse(ResponseT response) { outerObserver.onResponse(response); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index ec237aabf7..7d2cd85b65 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -47,6 +47,8 @@ import com.google.bigtable.v2.BigtableGrpc; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; +import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; @@ -55,6 +57,7 @@ import com.google.bigtable.v2.ReadModifyWriteRowResponse; import com.google.bigtable.v2.ReadRowsRequest; import com.google.bigtable.v2.ReadRowsResponse; +import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.SampleRowKeysRequest; import com.google.bigtable.v2.SampleRowKeysResponse; import com.google.cloud.bigtable.Version; @@ -70,6 +73,7 @@ import com.google.cloud.bigtable.data.v2.models.RowAdapter; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.stub.changestream.ListChangeStreamPartitionsUserCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory; @@ -84,7 +88,6 @@ import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsRetryingCallable; import com.google.cloud.bigtable.data.v2.stub.readrows.FilterMarkerRowsCallable; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor; -import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsConvertExceptionCallable; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsResumptionStrategy; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsRetryCompletedCallable; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsUserCallable; @@ -142,6 +145,8 @@ public class EnhancedBigtableStub implements AutoCloseable { private final UnaryCallable checkAndMutateRowCallable; private final UnaryCallable readModifyWriteRowCallable; + private final ServerStreamingCallable listChangeStreamPartitionsCallable; + public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings) throws IOException { settings = finalizeSettings(settings, Tags.getTagger(), Stats.getStatsRecorder()); @@ -284,6 +289,7 @@ public EnhancedBigtableStub(EnhancedBigtableStubSettings settings, ClientContext bulkMutateRowsCallable = createBulkMutateRowsCallable(); checkAndMutateRowCallable = createCheckAndMutateRowCallable(); readModifyWriteRowCallable = createReadModifyWriteRowCallable(); + listChangeStreamPartitionsCallable = createListChangeStreamPartitionsCallable(); } // @@ -410,7 +416,7 @@ public Map extract(ReadRowsRequest readRowsRequest) { // should be treated similar to UNAVAILABLE. However, this exception has an INTERNAL error code // which by default is not retryable. Convert the exception so it can be retried in the client. ServerStreamingCallable convertException = - new ReadRowsConvertExceptionCallable<>(withStatsHeaders); + new ConvertStreamExceptionCallable<>(withStatsHeaders); ServerStreamingCallable merging = new RowMergingCallable<>(convertException, rowAdapter); @@ -798,6 +804,76 @@ public Map extract(ReadModifyWriteRowRequest request) { methodName, new ReadModifyWriteRowCallable(retrying, requestContext)); } + /** + * Creates a callable chain to handle streaming ListChangeStreamPartitions RPCs. The chain will: + * + *
    + *
  • Convert a String format tableId into a {@link + * com.google.bigtable.v2.ListChangeStreamPartitionsRequest} and dispatch the RPC. + *
  • Upon receiving the response stream, it will convert the {@link + * com.google.bigtable.v2.ListChangeStreamPartitionsResponse}s into {@link RowRange}. + *
+ */ + private ServerStreamingCallable createListChangeStreamPartitionsCallable() { + ServerStreamingCallable + base = + GrpcRawCallableFactory.createServerStreamingCallable( + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(BigtableGrpc.getListChangeStreamPartitionsMethod()) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract( + ListChangeStreamPartitionsRequest listChangeStreamPartitionsRequest) { + return ImmutableMap.of( + "table_name", + listChangeStreamPartitionsRequest.getTableName(), + "app_profile_id", + listChangeStreamPartitionsRequest.getAppProfileId()); + } + }) + .build(), + settings.listChangeStreamPartitionsSettings().getRetryableCodes()); + + ServerStreamingCallable userCallable = + new ListChangeStreamPartitionsUserCallable(base, requestContext); + + ServerStreamingCallable withStatsHeaders = + new StatsHeadersServerStreamingCallable<>(userCallable); + + // Sometimes ListChangeStreamPartitions connections are disconnected via an RST frame. This + // error is transient and should be treated similar to UNAVAILABLE. However, this exception + // has an INTERNAL error code which by default is not retryable. Convert the exception so it + // can be retried in the client. + ServerStreamingCallable convertException = + new ConvertStreamExceptionCallable<>(withStatsHeaders); + + // Copy idle timeout settings for watchdog. + ServerStreamingCallSettings innerSettings = + ServerStreamingCallSettings.newBuilder() + .setRetryableCodes(settings.listChangeStreamPartitionsSettings().getRetryableCodes()) + .setRetrySettings(settings.listChangeStreamPartitionsSettings().getRetrySettings()) + .setIdleTimeout(settings.listChangeStreamPartitionsSettings().getIdleTimeout()) + .build(); + + ServerStreamingCallable watched = + Callables.watched(convertException, innerSettings, clientContext); + + ServerStreamingCallable withBigtableTracer = + new BigtableTracerStreamingCallable<>(watched); + + ServerStreamingCallable retrying = + Callables.retrying(withBigtableTracer, innerSettings, clientContext); + + SpanName span = getSpanName("ListChangeStreamPartitions"); + ServerStreamingCallable traced = + new TracedServerStreamingCallable<>(retrying, clientContext.getTracerFactory(), span); + + return traced.withDefaultCallContext(clientContext.getDefaultCallContext()); + } + /** * Wraps a callable chain in a user presentable callable that will inject the default call context * and trace the call. @@ -854,6 +930,11 @@ public UnaryCallable checkAndMutateRowCallable( public UnaryCallable readModifyWriteRowCallable() { return readModifyWriteRowCallable; } + + /** Returns a streaming list change stream partitions callable */ + public ServerStreamingCallable listChangeStreamPartitionsCallable() { + return listChangeStreamPartitionsCallable; + } //
private SpanName getSpanName(String methodName) { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 395ba52b08..83f0445bc5 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -33,6 +33,7 @@ import com.google.api.gax.rpc.TransportChannelProvider; import com.google.api.gax.rpc.UnaryCallSettings; import com.google.auth.Credentials; +import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.KeyOffset; @@ -137,6 +138,22 @@ public class EnhancedBigtableStubSettings extends StubSettings LIST_CHANGE_STREAM_PARTITIONS_RETRY_CODES = + ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); + + private static final RetrySettings LIST_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setMaxAttempts(10) + .setJittered(true) + .setInitialRpcTimeout(Duration.ofMinutes(1)) + .setRpcTimeoutMultiplier(2.0) + .setMaxRpcTimeout(Duration.ofMinutes(10)) + .setTotalTimeout(Duration.ofMinutes(60)) + .build(); + /** * Scopes that are equivalent to JWT's audience. * @@ -174,6 +191,8 @@ public class EnhancedBigtableStubSettings extends StubSettings checkAndMutateRowSettings; private final UnaryCallSettings readModifyWriteRowSettings; + private final ServerStreamingCallSettings listChangeStreamPartitionsSettings; + private EnhancedBigtableStubSettings(Builder builder) { super(builder); @@ -208,6 +227,7 @@ private EnhancedBigtableStubSettings(Builder builder) { bulkReadRowsSettings = builder.bulkReadRowsSettings.build(); checkAndMutateRowSettings = builder.checkAndMutateRowSettings.build(); readModifyWriteRowSettings = builder.readModifyWriteRowSettings.build(); + listChangeStreamPartitionsSettings = builder.listChangeStreamPartitionsSettings.build(); } /** Create a new builder. */ @@ -491,6 +511,10 @@ public UnaryCallSettings readModifyWriteRowSettings() { return readModifyWriteRowSettings; } + public ServerStreamingCallSettings listChangeStreamPartitionsSettings() { + return listChangeStreamPartitionsSettings; + } + /** Returns a builder containing all the values of this settings class. */ public Builder toBuilder() { return new Builder(this); @@ -516,6 +540,9 @@ public static class Builder extends StubSettings.Builder readModifyWriteRowSettings; + private final ServerStreamingCallSettings.Builder + listChangeStreamPartitionsSettings; + /** * Initializes a new Builder with sane defaults for all settings. * @@ -626,6 +653,12 @@ private Builder() { readModifyWriteRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); copyRetrySettings(baseDefaults.readModifyWriteRowSettings(), readModifyWriteRowSettings); + + listChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder(); + listChangeStreamPartitionsSettings + .setRetryableCodes(LIST_CHANGE_STREAM_PARTITIONS_RETRY_CODES) + .setRetrySettings(LIST_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS) + .setIdleTimeout(Duration.ofMinutes(5)); } private Builder(EnhancedBigtableStubSettings settings) { @@ -646,6 +679,7 @@ private Builder(EnhancedBigtableStubSettings settings) { bulkReadRowsSettings = settings.bulkReadRowsSettings.toBuilder(); checkAndMutateRowSettings = settings.checkAndMutateRowSettings.toBuilder(); readModifyWriteRowSettings = settings.readModifyWriteRowSettings.toBuilder(); + listChangeStreamPartitionsSettings = settings.listChangeStreamPartitionsSettings.toBuilder(); } // @@ -857,6 +891,7 @@ public String toString() { .add("bulkReadRowsSettings", bulkReadRowsSettings) .add("checkAndMutateRowSettings", checkAndMutateRowSettings) .add("readModifyWriteRowSettings", readModifyWriteRowSettings) + .add("listChangeStreamPartitionsSettings", listChangeStreamPartitionsSettings) .add("parent", super.toString()) .toString(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java new file mode 100644 index 0000000000..1d3393bb2b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java @@ -0,0 +1,93 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamController; +import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; +import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.RowRange; +import com.google.cloud.bigtable.data.v2.internal.NameUtil; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; + +/** Simple wrapper for ListChangeStreamPartitions to wrap the request and response protobufs. */ +public class ListChangeStreamPartitionsUserCallable + extends ServerStreamingCallable { + private final RequestContext requestContext; + private final ServerStreamingCallable< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + inner; + + public ListChangeStreamPartitionsUserCallable( + ServerStreamingCallable + inner, + RequestContext requestContext) { + this.requestContext = requestContext; + this.inner = inner; + } + + @Override + public void call( + String tableId, ResponseObserver responseObserver, ApiCallContext context) { + String tableName = + NameUtil.formatTableName( + requestContext.getProjectId(), requestContext.getInstanceId(), tableId); + ListChangeStreamPartitionsRequest request = + ListChangeStreamPartitionsRequest.newBuilder() + .setTableName(tableName) + .setAppProfileId(requestContext.getAppProfileId()) + .build(); + + inner.call(request, new ConvertPartitionToRangeObserver(responseObserver), context); + } + + private class ConvertPartitionToRangeObserver + implements ResponseObserver { + + private final ResponseObserver outerObserver; + + ConvertPartitionToRangeObserver(ResponseObserver observer) { + this.outerObserver = observer; + } + + @Override + public void onStart(final StreamController controller) { + outerObserver.onStart(controller); + } + + @Override + public void onResponse(ListChangeStreamPartitionsResponse response) { + RowRange rowRange = + RowRange.newBuilder() + .setStartKeyClosed(response.getPartition().getRowRange().getStartKeyClosed()) + .setEndKeyOpen(response.getPartition().getRowRange().getEndKeyOpen()) + .build(); + outerObserver.onResponse(rowRange); + } + + @Override + public void onError(Throwable t) { + outerObserver.onError(t); + } + + @Override + public void onComplete() { + outerObserver.onComplete(); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java index 34c9a29d71..fcbcc15e30 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java @@ -24,6 +24,7 @@ import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.UnaryCallable; +import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters.Filter; @@ -79,6 +80,9 @@ public class BigtableDataClientTests { @Mock private Batcher mockBulkMutationBatcher; @Mock private Batcher mockBulkReadRowsBatcher; + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ServerStreamingCallable mockListChangeStreamPartitionsCallable; + private BigtableDataClient bigtableDataClient; @Before @@ -153,6 +157,14 @@ public void proxyReadRowsCallableTest() { assertThat(bigtableDataClient.readRowsCallable()).isSameInstanceAs(mockReadRowsCallable); } + @Test + public void proxyListChangeStreamPartitionsCallableTest() { + Mockito.when(mockStub.listChangeStreamPartitionsCallable()) + .thenReturn(mockListChangeStreamPartitionsCallable); + assertThat(bigtableDataClient.listChangeStreamPartitionsCallable()) + .isSameInstanceAs(mockListChangeStreamPartitionsCallable); + } + @Test public void proxyReadRowAsyncTest() { Mockito.when(mockStub.readRowCallable()).thenReturn(mockReadRowCallable); @@ -300,6 +312,28 @@ public void proxyReadRowsAsyncTest() { Mockito.verify(mockReadRowsCallable).call(query, mockObserver); } + @Test + public void proxyListChangeStreamPartitionsSyncTest() { + Mockito.when(mockStub.listChangeStreamPartitionsCallable()) + .thenReturn(mockListChangeStreamPartitionsCallable); + + bigtableDataClient.listChangeStreamPartitions("fake-table"); + + Mockito.verify(mockListChangeStreamPartitionsCallable).call("fake-table"); + } + + @Test + public void proxyListChangeStreamPartitionsAsyncTest() { + Mockito.when(mockStub.listChangeStreamPartitionsCallable()) + .thenReturn(mockListChangeStreamPartitionsCallable); + + @SuppressWarnings("unchecked") + ResponseObserver mockObserver = Mockito.mock(ResponseObserver.class); + bigtableDataClient.listChangeStreamPartitionsAsync("fake-table", mockObserver); + + Mockito.verify(mockListChangeStreamPartitionsCallable).call("fake-table", mockObserver); + } + @Test public void proxySampleRowKeysCallableTest() { Mockito.when(mockStub.sampleRowKeysCallable()).thenReturn(mockSampleRowKeysCallable); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallableTest.java new file mode 100644 index 0000000000..8f08e15b23 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/ConvertStreamExceptionCallableTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConvertStreamExceptionCallableTest { + + @Test + public void rstStreamExceptionConvertedToRetryableTest() { + ApiException originalException = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "INTERNAL: HTTP/2 error code: INTERNAL_ERROR\nReceived Rst Stream")), + GrpcStatusCode.of(Status.Code.INTERNAL), + false); + assertFalse(originalException.isRetryable()); + SettableExceptionCallable settableExceptionCallable = + new SettableExceptionCallable<>(originalException); + ConvertStreamExceptionCallable convertStreamExceptionCallable = + new ConvertStreamExceptionCallable<>(settableExceptionCallable); + + Throwable actualError = null; + try { + convertStreamExceptionCallable.all().call("fake-request"); + } catch (Throwable t) { + actualError = t; + } + assert actualError instanceof InternalException; + InternalException actualException = (InternalException) actualError; + assertTrue(actualException.isRetryable()); + } + + private static final class SettableExceptionCallable + extends ServerStreamingCallable { + private final Throwable throwable; + + public SettableExceptionCallable(Throwable throwable) { + this.throwable = throwable; + } + + @Override + public void call( + RequestT request, ResponseObserver responseObserver, ApiCallContext context) { + responseObserver.onError(throwable); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index c4e5ea2e40..32ab93d1f2 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -701,6 +701,7 @@ public void isRefreshingChannelFalseValueTest() { "bulkReadRowsSettings", "checkAndMutateRowSettings", "readModifyWriteRowSettings", + "listChangeStreamPartitionsSettings", }; @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java new file mode 100644 index 0000000000..03db35f8d6 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; +import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.internal.NameUtil; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi; +import com.google.common.collect.Lists; +import com.google.common.truth.Truth; +import com.google.protobuf.ByteString; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ListChangeStreamPartitionsUserCallableTest { + private final RequestContext requestContext = + RequestContext.create("my-project", "my-instance", "my-profile"); + + @Test + public void requestIsCorrect() { + FakeStreamingApi.ServerStreamingStashCallable< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + inner = new FakeStreamingApi.ServerStreamingStashCallable<>(Lists.newArrayList()); + ListChangeStreamPartitionsUserCallable listChangeStreamPartitionsUserCallable = + new ListChangeStreamPartitionsUserCallable(inner, requestContext); + + listChangeStreamPartitionsUserCallable.all().call("my-table"); + assertThat(inner.getActualRequest()) + .isEqualTo( + ListChangeStreamPartitionsRequest.newBuilder() + .setTableName( + NameUtil.formatTableName( + requestContext.getProjectId(), requestContext.getInstanceId(), "my-table")) + .setAppProfileId(requestContext.getAppProfileId()) + .build()); + } + + @Test + public void responseIsConverted() { + FakeStreamingApi.ServerStreamingStashCallable< + ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + inner = + new FakeStreamingApi.ServerStreamingStashCallable<>( + Lists.newArrayList( + ListChangeStreamPartitionsResponse.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("apple")) + .setEndKeyOpen(ByteString.copyFromUtf8("banana")) + .build()) + .build()) + .build())); + ListChangeStreamPartitionsUserCallable listChangeStreamPartitionsUserCallable = + new ListChangeStreamPartitionsUserCallable(inner, requestContext); + + List results = listChangeStreamPartitionsUserCallable.all().call("my-table"); + Truth.assertThat(results) + .containsExactly( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("apple")) + .setEndKeyOpen(ByteString.copyFromUtf8("banana")) + .build()); + } +} From 39a7b58337c9bcd29da92218422be59a1f74163a Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Tue, 26 Jul 2022 13:06:57 -0400 Subject: [PATCH 03/13] feat: Create ReadChangeStreamQuery and ChangeStreamRecode::Heartbeat/CloseStream (#1318) * feat: Add ReadChangeStreamQuery and ChangeStreamRecord::Heartbeat/CloseStream 1. ReadChangeStreamQuery will be used by readChangeStream(TODO) 2. ChangeStreamRecord is one of: Heartbeat, CloseStream, or a ChangeStreamMutation(TODO) * fix: Address comments about styles * fix: Remove `InternalApi` tag for package private methods in veneer client Co-authored-by: Teng Zhong --- .../models/ChangeStreamContinuationToken.java | 99 +++++ .../data/v2/models/ChangeStreamRecord.java | 25 ++ .../bigtable/data/v2/models/CloseStream.java | 102 +++++ .../bigtable/data/v2/models/Heartbeat.java | 79 ++++ .../data/v2/models/ReadChangeStreamQuery.java | 268 +++++++++++++ .../v2/models/ChangeStreamRecordTest.java | 166 ++++++++ .../v2/models/ReadChangeStreamQueryTest.java | 368 ++++++++++++++++++ 7 files changed, 1107 insertions(+) create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecord.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQuery.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQueryTest.java diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java new file mode 100644 index 0000000000..f499a94e45 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java @@ -0,0 +1,99 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import javax.annotation.Nonnull; + +/** A simple wrapper for {@link StreamContinuationToken}. */ +public final class ChangeStreamContinuationToken implements Serializable { + private static final long serialVersionUID = 524679926247095L; + + private transient StreamContinuationToken.Builder builder; + + private ChangeStreamContinuationToken(@Nonnull StreamContinuationToken.Builder builder) { + this.builder = builder; + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + builder = StreamContinuationToken.newBuilder().mergeFrom(input); + } + + private void writeObject(ObjectOutputStream output) throws IOException { + output.defaultWriteObject(); + builder.build().writeTo(output); + } + + public RowRange getRowRange() { + return this.builder.getPartition().getRowRange(); + } + + public String getToken() { + return this.builder.getToken(); + } + + /** + * Creates the protobuf. This method is considered an internal implementation detail and not meant + * to be used by applications. + */ + StreamContinuationToken toProto() { + return builder.build(); + } + + /** Wraps the protobuf {@link StreamContinuationToken}. */ + static ChangeStreamContinuationToken fromProto( + @Nonnull StreamContinuationToken streamContinuationToken) { + return new ChangeStreamContinuationToken(streamContinuationToken.toBuilder()); + } + + public ChangeStreamContinuationToken clone() { + return new ChangeStreamContinuationToken(this.builder.clone()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ChangeStreamContinuationToken otherToken = (ChangeStreamContinuationToken) o; + return Objects.equal(getRowRange(), otherToken.getRowRange()) + && Objects.equal(getToken(), otherToken.getToken()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getRowRange(), getToken()); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("rowRange", getRowRange()) + .add("token", getToken()) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecord.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecord.java new file mode 100644 index 0000000000..0bf5e0c31e --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecord.java @@ -0,0 +1,25 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.api.core.InternalExtensionOnly; + +/** + * Default representation of a change stream record, which can be a Heartbeat, a CloseStream, or a + * logical mutation. + */ +@InternalExtensionOnly +public interface ChangeStreamRecord {} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java new file mode 100644 index 0000000000..403705f676 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java @@ -0,0 +1,102 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.api.core.InternalApi; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Status; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.List; +import javax.annotation.Nonnull; + +public final class CloseStream implements ChangeStreamRecord, Serializable { + private static final long serialVersionUID = 7316215828353608505L; + private final Status status; + private transient ImmutableList.Builder + changeStreamContinuationTokens = ImmutableList.builder(); + + private CloseStream(Status status, List continuationTokens) { + this.status = status; + for (StreamContinuationToken streamContinuationToken : continuationTokens) { + changeStreamContinuationTokens.add( + ChangeStreamContinuationToken.fromProto(streamContinuationToken)); + } + } + + @InternalApi("Used in Changestream beam pipeline.") + public Status getStatus() { + return this.status; + } + + @InternalApi("Used in Changestream beam pipeline.") + public List getChangeStreamContinuationTokens() { + return changeStreamContinuationTokens.build(); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + + @SuppressWarnings("unchecked") + ImmutableList deserialized = + (ImmutableList) input.readObject(); + this.changeStreamContinuationTokens = + ImmutableList.builder().addAll(deserialized); + } + + private void writeObject(ObjectOutputStream output) throws IOException { + output.defaultWriteObject(); + output.writeObject(changeStreamContinuationTokens.build()); + } + + /** Wraps the protobuf {@link ReadChangeStreamResponse.CloseStream}. */ + static CloseStream fromProto(@Nonnull ReadChangeStreamResponse.CloseStream closeStream) { + return new CloseStream(closeStream.getStatus(), closeStream.getContinuationTokensList()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CloseStream record = (CloseStream) o; + return Objects.equal(status, record.getStatus()) + && Objects.equal( + changeStreamContinuationTokens.build(), record.getChangeStreamContinuationTokens()); + } + + @Override + public int hashCode() { + return Objects.hashCode(status, changeStreamContinuationTokens); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("status", status) + .add("changeStreamContinuationTokens", changeStreamContinuationTokens) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java new file mode 100644 index 0000000000..73876f887b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.api.core.InternalApi; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; +import com.google.protobuf.Timestamp; +import java.io.Serializable; +import javax.annotation.Nonnull; + +public final class Heartbeat implements ChangeStreamRecord, Serializable { + private static final long serialVersionUID = 7316215828353608504L; + private final Timestamp lowWatermark; + private final ChangeStreamContinuationToken changeStreamContinuationToken; + + private Heartbeat( + Timestamp lowWatermark, ChangeStreamContinuationToken changeStreamContinuationToken) { + this.lowWatermark = lowWatermark; + this.changeStreamContinuationToken = changeStreamContinuationToken; + } + + @InternalApi("Used in Changestream beam pipeline.") + public ChangeStreamContinuationToken getChangeStreamContinuationToken() { + return changeStreamContinuationToken; + } + + @InternalApi("Used in Changestream beam pipeline.") + public Timestamp getLowWatermark() { + return lowWatermark; + } + + /** Wraps the protobuf {@link ReadChangeStreamResponse.Heartbeat}. */ + static Heartbeat fromProto(@Nonnull ReadChangeStreamResponse.Heartbeat heartbeat) { + return new Heartbeat( + heartbeat.getLowWatermark(), + ChangeStreamContinuationToken.fromProto(heartbeat.getContinuationToken())); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Heartbeat record = (Heartbeat) o; + return Objects.equal(lowWatermark, record.getLowWatermark()) + && Objects.equal(changeStreamContinuationToken, record.getChangeStreamContinuationToken()); + } + + @Override + public int hashCode() { + return Objects.hashCode(lowWatermark, changeStreamContinuationToken); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("lowWatermark", lowWatermark) + .add("changeStreamContinuationToken", changeStreamContinuationToken) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQuery.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQuery.java new file mode 100644 index 0000000000..5ac3a743f6 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQuery.java @@ -0,0 +1,268 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.api.core.InternalApi; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationTokens; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.internal.NameUtil; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.List; +import javax.annotation.Nonnull; + +/** A simple wrapper to construct a query for the ReadChangeStream RPC. */ +public final class ReadChangeStreamQuery implements Serializable { + private static final long serialVersionUID = 948588515749969176L; + + private final String tableId; + private transient ReadChangeStreamRequest.Builder builder = ReadChangeStreamRequest.newBuilder(); + + /** + * Constructs a new ReadChangeStreamQuery object for the specified table id. The table id will be + * combined with the instance name specified in the {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings}. + */ + public static ReadChangeStreamQuery create(String tableId) { + return new ReadChangeStreamQuery(tableId); + } + + private ReadChangeStreamQuery(String tableId) { + this.tableId = tableId; + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + builder = ReadChangeStreamRequest.newBuilder().mergeFrom(input); + } + + private void writeObject(ObjectOutputStream output) throws IOException { + output.defaultWriteObject(); + builder.build().writeTo(output); + } + + /** + * Adds a partition. + * + * @param rowRange Represents the partition in the form [startKey, endKey). startKey can be null + * to represent negative infinity. endKey can be null to represent positive infinity. + */ + public ReadChangeStreamQuery streamPartition(@Nonnull RowRange rowRange) { + builder.setPartition(StreamPartition.newBuilder().setRowRange(rowRange).build()); + return this; + } + + /** + * Adds a partition. + * + * @param start The beginning of the range (inclusive). Can be null to represent negative + * infinity. + * @param end The end of the range (exclusive). Can be null to represent positive infinity. + */ + public ReadChangeStreamQuery streamPartition(String start, String end) { + return streamPartition(wrapKey(start), wrapKey(end)); + } + + /** + * Adds a partition. + * + * @param start The beginning of the range (inclusive). Can be null to represent negative + * infinity. + * @param end The end of the range (exclusive). Can be null to represent positive infinity. + */ + public ReadChangeStreamQuery streamPartition(ByteString start, ByteString end) { + RowRange.Builder rangeBuilder = RowRange.newBuilder(); + if (start != null) { + rangeBuilder.setStartKeyClosed(start); + } + if (end != null) { + rangeBuilder.setEndKeyOpen(end); + } + return streamPartition(rangeBuilder.build()); + } + + /** Adds a partition. */ + public ReadChangeStreamQuery streamPartition(ByteStringRange range) { + RowRange.Builder rangeBuilder = RowRange.newBuilder(); + + switch (range.getStartBound()) { + case OPEN: + throw new IllegalStateException("Start bound should be closed."); + case CLOSED: + rangeBuilder.setStartKeyClosed(range.getStart()); + break; + case UNBOUNDED: + rangeBuilder.clearStartKey(); + break; + default: + throw new IllegalStateException("Unknown start bound: " + range.getStartBound()); + } + + switch (range.getEndBound()) { + case OPEN: + rangeBuilder.setEndKeyOpen(range.getEnd()); + break; + case CLOSED: + throw new IllegalStateException("End bound should be open."); + case UNBOUNDED: + rangeBuilder.clearEndKey(); + break; + default: + throw new IllegalStateException("Unknown end bound: " + range.getEndBound()); + } + + return streamPartition(rangeBuilder.build()); + } + + /** Sets the startTime to read the change stream. */ + public ReadChangeStreamQuery startTime(Timestamp value) { + Preconditions.checkArgument( + !builder.hasContinuationTokens(), + "startTime and continuationTokens can't be specified together"); + builder.setStartTime(value); + return this; + } + + /** Sets the endTime to read the change stream. */ + public ReadChangeStreamQuery endTime(Timestamp value) { + builder.setEndTime(value); + return this; + } + + /** Sets the stream continuation tokens to read the change stream. */ + public ReadChangeStreamQuery continuationTokens( + List changeStreamContinuationTokens) { + Preconditions.checkArgument( + !builder.hasStartTime(), "startTime and continuationTokens can't be specified together"); + StreamContinuationTokens.Builder streamContinuationTokensBuilder = + StreamContinuationTokens.newBuilder(); + for (ChangeStreamContinuationToken changeStreamContinuationToken : + changeStreamContinuationTokens) { + builder.setContinuationTokens( + streamContinuationTokensBuilder.addTokens(changeStreamContinuationToken.toProto())); + } + builder.setContinuationTokens(streamContinuationTokensBuilder.build()); + return this; + } + + /** Sets the heartbeat duration for the change stream. */ + public ReadChangeStreamQuery heartbeatDuration(long seconds) { + return heartbeatDuration(seconds, 0); + } + + /** Sets the heartbeat duration for the change stream. */ + public ReadChangeStreamQuery heartbeatDuration(long seconds, int nanos) { + builder.setHeartbeatDuration(Duration.newBuilder().setSeconds(seconds).setNanos(nanos).build()); + return this; + } + + /** + * Creates the request protobuf. This method is considered an internal implementation detail and + * not meant to be used by applications. + */ + @InternalApi("Used in Changestream beam pipeline.") + public ReadChangeStreamRequest toProto(RequestContext requestContext) { + String tableName = + NameUtil.formatTableName( + requestContext.getProjectId(), requestContext.getInstanceId(), tableId); + + return builder + .setTableName(tableName) + .setAppProfileId(requestContext.getAppProfileId()) + .build(); + } + + /** + * Wraps the protobuf {@link ReadChangeStreamRequest}. + * + *

WARNING: Please note that the project id & instance id in the table name will be overwritten + * by the configuration in the BigtableDataClient. + */ + public static ReadChangeStreamQuery fromProto(@Nonnull ReadChangeStreamRequest request) { + ReadChangeStreamQuery query = + new ReadChangeStreamQuery(NameUtil.extractTableIdFromTableName(request.getTableName())); + query.builder = request.toBuilder(); + + return query; + } + + public ReadChangeStreamQuery clone() { + ReadChangeStreamQuery query = ReadChangeStreamQuery.create(tableId); + query.builder = this.builder.clone(); + return query; + } + + private static ByteString wrapKey(String key) { + if (key == null) { + return null; + } + return ByteString.copyFromUtf8(key); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReadChangeStreamQuery query = (ReadChangeStreamQuery) o; + return Objects.equal(tableId, query.tableId) + && Objects.equal(builder.getPartition(), query.builder.getPartition()) + && Objects.equal(builder.getStartTime(), query.builder.getStartTime()) + && Objects.equal(builder.getEndTime(), query.builder.getEndTime()) + && Objects.equal(builder.getContinuationTokens(), query.builder.getContinuationTokens()) + && Objects.equal(builder.getHeartbeatDuration(), query.builder.getHeartbeatDuration()); + } + + @Override + public int hashCode() { + return Objects.hashCode( + tableId, + builder.getPartition(), + builder.getStartTime(), + builder.getEndTime(), + builder.getContinuationTokens(), + builder.getHeartbeatDuration()); + } + + @Override + public String toString() { + ReadChangeStreamRequest request = builder.build(); + + return MoreObjects.toStringHelper(this) + .add("tableId", tableId) + .add("partition", request.getPartition()) + .add("startTime", request.getStartTime()) + .add("endTime", request.getEndTime()) + .add("continuationTokens", request.getContinuationTokens()) + .add("heartbeatDuration", request.getHeartbeatDuration()) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java new file mode 100644 index 0000000000..c82aae7330 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java @@ -0,0 +1,166 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamPartition; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ChangeStreamRecordTest { + + @Test + public void heartbeatSerializationTest() throws IOException, ClassNotFoundException { + ReadChangeStreamResponse.Heartbeat heartbeatProto = + ReadChangeStreamResponse.Heartbeat.newBuilder() + .setLowWatermark(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()) + .setContinuationToken( + StreamContinuationToken.newBuilder().setToken("random-token").build()) + .build(); + Heartbeat heartbeat = Heartbeat.fromProto(heartbeatProto); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(heartbeat); + oos.close(); + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + Heartbeat actual = (Heartbeat) ois.readObject(); + assertThat(actual).isEqualTo(heartbeat); + } + + @Test + public void closeStreamSerializationTest() throws IOException, ClassNotFoundException { + com.google.rpc.Status status = com.google.rpc.Status.newBuilder().setCode(0).build(); + RowRange rowRange1 = + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("")) + .setEndKeyOpen(ByteString.copyFromUtf8("apple")) + .build(); + String token1 = "close-stream-token-1"; + RowRange rowRange2 = + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("apple")) + .setEndKeyOpen(ByteString.copyFromUtf8("")) + .build(); + String token2 = "close-stream-token-2"; + ReadChangeStreamResponse.CloseStream closeStreamProto = + ReadChangeStreamResponse.CloseStream.newBuilder() + .addContinuationTokens( + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange1).build()) + .setToken(token1) + .build()) + .addContinuationTokens( + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange2).build()) + .setToken(token2) + .build()) + .setStatus(status) + .build(); + CloseStream closeStream = CloseStream.fromProto(closeStreamProto); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(closeStream); + oos.close(); + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + CloseStream actual = (CloseStream) ois.readObject(); + assertThat(actual.getChangeStreamContinuationTokens()) + .isEqualTo(closeStream.getChangeStreamContinuationTokens()); + assertThat(actual.getStatus()).isEqualTo(closeStream.getStatus()); + } + + @Test + public void heartbeatTest() { + Timestamp lowWatermark = Timestamp.newBuilder().setSeconds(1000).build(); + RowRange rowRange = + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("apple")) + .setEndKeyOpen(ByteString.copyFromUtf8("banana")) + .build(); + String token = "heartbeat-token"; + ReadChangeStreamResponse.Heartbeat heartbeatProto = + ReadChangeStreamResponse.Heartbeat.newBuilder() + .setLowWatermark(lowWatermark) + .setContinuationToken( + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange).build()) + .setToken(token) + .build()) + .build(); + Heartbeat actualHeartbeat = Heartbeat.fromProto(heartbeatProto); + + Assert.assertEquals(actualHeartbeat.getLowWatermark(), lowWatermark); + Assert.assertEquals(actualHeartbeat.getChangeStreamContinuationToken().getRowRange(), rowRange); + Assert.assertEquals(actualHeartbeat.getChangeStreamContinuationToken().getToken(), token); + } + + @Test + public void closeStreamTest() { + com.google.rpc.Status status = com.google.rpc.Status.newBuilder().setCode(0).build(); + RowRange rowRange1 = + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("")) + .setEndKeyOpen(ByteString.copyFromUtf8("apple")) + .build(); + String token1 = "close-stream-token-1"; + RowRange rowRange2 = + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("apple")) + .setEndKeyOpen(ByteString.copyFromUtf8("")) + .build(); + String token2 = "close-stream-token-2"; + ReadChangeStreamResponse.CloseStream closeStreamProto = + ReadChangeStreamResponse.CloseStream.newBuilder() + .addContinuationTokens( + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange1).build()) + .setToken(token1) + .build()) + .addContinuationTokens( + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange2).build()) + .setToken(token2) + .build()) + .setStatus(status) + .build(); + CloseStream actualCloseStream = CloseStream.fromProto(closeStreamProto); + + Assert.assertEquals(status, actualCloseStream.getStatus()); + Assert.assertEquals( + rowRange1, actualCloseStream.getChangeStreamContinuationTokens().get(0).getRowRange()); + Assert.assertEquals( + token1, actualCloseStream.getChangeStreamContinuationTokens().get(0).getToken()); + Assert.assertEquals( + rowRange2, actualCloseStream.getChangeStreamContinuationTokens().get(1).getRowRange()); + Assert.assertEquals( + token2, actualCloseStream.getChangeStreamContinuationTokens().get(1).getToken()); + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQueryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQueryTest.java new file mode 100644 index 0000000000..cae2d93926 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ReadChangeStreamQueryTest.java @@ -0,0 +1,368 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamRequest.Builder; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamContinuationTokens; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.internal.NameUtil; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; +import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; +import com.google.protobuf.Timestamp; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Collections; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadChangeStreamQueryTest { + private static final String PROJECT_ID = "fake-project"; + private static final String INSTANCE_ID = "fake-instance"; + private static final String TABLE_ID = "fake-table"; + private static final String APP_PROFILE_ID = "fake-profile-id"; + private RequestContext requestContext; + + @Rule public ExpectedException expect = ExpectedException.none(); + + @Before + public void setUp() { + requestContext = RequestContext.create(PROJECT_ID, INSTANCE_ID, APP_PROFILE_ID); + } + + @Test + public void requestContextTest() { + ReadChangeStreamQuery query = ReadChangeStreamQuery.create(TABLE_ID); + + ReadChangeStreamRequest proto = query.toProto(requestContext); + assertThat(proto).isEqualTo(expectedProtoBuilder().build()); + } + + @Test + public void streamPartitionTest() { + // Case 1: String. + ReadChangeStreamQuery query1 = + ReadChangeStreamQuery.create(TABLE_ID).streamPartition("simple-begin", "simple-end"); + ReadChangeStreamRequest actualProto1 = query1.toProto(requestContext); + Builder expectedProto1 = expectedProtoBuilder(); + expectedProto1.setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("simple-begin")) + .setEndKeyOpen(ByteString.copyFromUtf8("simple-end")) + .build()) + .build()); + assertThat(actualProto1).isEqualTo(expectedProto1.build()); + + // Case 2: ByteString. + ReadChangeStreamQuery query2 = + ReadChangeStreamQuery.create(TABLE_ID) + .streamPartition( + ByteString.copyFromUtf8("byte-begin"), ByteString.copyFromUtf8("byte-end")); + ReadChangeStreamRequest actualProto2 = query2.toProto(requestContext); + Builder expectedProto2 = expectedProtoBuilder(); + expectedProto2.setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("byte-begin")) + .setEndKeyOpen(ByteString.copyFromUtf8("byte-end")) + .build()) + .build()); + assertThat(actualProto2).isEqualTo(expectedProto2.build()); + + // Case 3: ByteStringRange. + ReadChangeStreamQuery query3 = + ReadChangeStreamQuery.create(TABLE_ID) + .streamPartition(ByteStringRange.create("range-begin", "range-end")); + ReadChangeStreamRequest actualProto3 = query3.toProto(requestContext); + Builder expectedProto3 = expectedProtoBuilder(); + expectedProto3.setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("range-begin")) + .setEndKeyOpen(ByteString.copyFromUtf8("range-end")) + .build()) + .build()); + assertThat(actualProto3).isEqualTo(expectedProto3.build()); + } + + @Test + public void startTimeTest() { + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create(TABLE_ID) + .startTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()); + + Builder expectedProto = + expectedProtoBuilder() + .setStartTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()); + + ReadChangeStreamRequest actualProto = query.toProto(requestContext); + assertThat(actualProto).isEqualTo(expectedProto.build()); + } + + @Test + public void endTimeTest() { + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create(TABLE_ID) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()); + + Builder expectedProto = + expectedProtoBuilder() + .setEndTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()); + + ReadChangeStreamRequest actualProto = query.toProto(requestContext); + assertThat(actualProto).isEqualTo(expectedProto.build()); + } + + @Test + public void heartbeatDurationTest() { + ReadChangeStreamQuery query = ReadChangeStreamQuery.create(TABLE_ID).heartbeatDuration(5); + + Builder expectedProto = + expectedProtoBuilder() + .setHeartbeatDuration(com.google.protobuf.Duration.newBuilder().setSeconds(5).build()); + + ReadChangeStreamRequest actualProto = query.toProto(requestContext); + assertThat(actualProto).isEqualTo(expectedProto.build()); + } + + @Test + public void continuationTokensTest() { + StreamContinuationToken tokenProto = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("start")) + .setEndKeyOpen(ByteString.copyFromUtf8("end")) + .build()) + .build()) + .setToken("random-token") + .build(); + ChangeStreamContinuationToken token = ChangeStreamContinuationToken.fromProto(tokenProto); + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create(TABLE_ID).continuationTokens(Collections.singletonList(token)); + + Builder expectedProto = + expectedProtoBuilder() + .setContinuationTokens( + StreamContinuationTokens.newBuilder().addTokens(tokenProto).build()); + + ReadChangeStreamRequest actualProto = query.toProto(requestContext); + assertThat(actualProto).isEqualTo(expectedProto.build()); + } + + @Test(expected = IllegalArgumentException.class) + public void createWithStartTimeAndContinuationTokensTest() { + StreamContinuationToken tokenProto = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("start")) + .setEndKeyOpen(ByteString.copyFromUtf8("end")) + .build()) + .build()) + .setToken("random-token") + .build(); + ChangeStreamContinuationToken token = ChangeStreamContinuationToken.fromProto(tokenProto); + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create(TABLE_ID) + .startTime(Timestamp.newBuilder().setSeconds(5).build()) + .continuationTokens(Collections.singletonList(token)); + expect.expect(IllegalArgumentException.class); + expect.expectMessage("startTime and continuationTokens can't be specified together"); + } + + @Test + public void serializationTest() throws IOException, ClassNotFoundException { + StreamContinuationToken tokenProto = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("start")) + .setEndKeyOpen(ByteString.copyFromUtf8("end")) + .build()) + .build()) + .setToken("random-token") + .build(); + ChangeStreamContinuationToken token = ChangeStreamContinuationToken.fromProto(tokenProto); + ReadChangeStreamQuery expected = + ReadChangeStreamQuery.create(TABLE_ID) + .streamPartition("simple-begin", "simple-end") + .continuationTokens(Collections.singletonList(token)) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(2000).build()) + .heartbeatDuration(5); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(expected); + oos.close(); + + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + + ReadChangeStreamQuery actual = (ReadChangeStreamQuery) ois.readObject(); + assertThat(actual.toProto(requestContext)).isEqualTo(expected.toProto(requestContext)); + } + + private static ReadChangeStreamRequest.Builder expectedProtoBuilder() { + return ReadChangeStreamRequest.newBuilder() + .setTableName(NameUtil.formatTableName(PROJECT_ID, INSTANCE_ID, TABLE_ID)) + .setAppProfileId(APP_PROFILE_ID); + } + + @Test + public void testFromProto() { + StreamContinuationToken token = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("")) + .setEndKeyOpen(ByteString.copyFromUtf8("")) + .build()) + .build()) + .setToken("random-token") + .build(); + ReadChangeStreamRequest request = + ReadChangeStreamRequest.newBuilder() + .setTableName(NameUtil.formatTableName(PROJECT_ID, INSTANCE_ID, TABLE_ID)) + .setAppProfileId(APP_PROFILE_ID) + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("")) + .setEndKeyClosed(ByteString.copyFromUtf8("")) + .build())) + .setContinuationTokens(StreamContinuationTokens.newBuilder().addTokens(token).build()) + .setEndTime(Timestamp.newBuilder().setSeconds(2000).build()) + .setHeartbeatDuration(Duration.newBuilder().setSeconds(5).build()) + .build(); + ReadChangeStreamQuery query = ReadChangeStreamQuery.fromProto(request); + assertThat(query.toProto(requestContext)).isEqualTo(request); + } + + @Test(expected = IllegalArgumentException.class) + public void testFromProtoWithEmptyTableId() { + ReadChangeStreamQuery.fromProto(ReadChangeStreamRequest.getDefaultInstance()); + + expect.expect(IllegalArgumentException.class); + expect.expectMessage("Invalid table name:"); + } + + @Test + public void testEquality() { + ReadChangeStreamQuery request = + ReadChangeStreamQuery.create(TABLE_ID) + .streamPartition("simple-begin", "simple-end") + .startTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(2000).build()) + .heartbeatDuration(5); + + // ReadChangeStreamQuery#toProto should not change the ReadChangeStreamQuery instance state + request.toProto(requestContext); + assertThat(request) + .isEqualTo( + ReadChangeStreamQuery.create(TABLE_ID) + .streamPartition("simple-begin", "simple-end") + .startTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(2000).build()) + .heartbeatDuration(5)); + + assertThat(ReadChangeStreamQuery.create(TABLE_ID).streamPartition("begin-1", "end-1")) + .isNotEqualTo(ReadChangeStreamQuery.create(TABLE_ID).streamPartition("begin-2", "end-1")); + assertThat( + ReadChangeStreamQuery.create(TABLE_ID) + .startTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build())) + .isNotEqualTo( + ReadChangeStreamQuery.create(TABLE_ID) + .startTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1001).build())); + assertThat( + ReadChangeStreamQuery.create(TABLE_ID) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build())) + .isNotEqualTo( + ReadChangeStreamQuery.create(TABLE_ID) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1001).build())); + assertThat(ReadChangeStreamQuery.create(TABLE_ID).heartbeatDuration(5)) + .isNotEqualTo(ReadChangeStreamQuery.create(TABLE_ID).heartbeatDuration(6)); + } + + @Test + public void testClone() { + StreamContinuationToken tokenProto = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("start")) + .setEndKeyOpen(ByteString.copyFromUtf8("end")) + .build()) + .build()) + .setToken("random-token") + .build(); + ChangeStreamContinuationToken token = ChangeStreamContinuationToken.fromProto(tokenProto); + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create(TABLE_ID) + .streamPartition("begin", "end") + .continuationTokens(Collections.singletonList(token)) + .endTime(Timestamp.newBuilder().setSeconds(2000).build()) + .heartbeatDuration(5); + ReadChangeStreamRequest request = + ReadChangeStreamRequest.newBuilder() + .setTableName(NameUtil.formatTableName(PROJECT_ID, INSTANCE_ID, TABLE_ID)) + .setAppProfileId(APP_PROFILE_ID) + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("begin")) + .setEndKeyOpen(ByteString.copyFromUtf8("end")) + .build())) + .setContinuationTokens( + StreamContinuationTokens.newBuilder().addTokens(tokenProto).build()) + .setEndTime(Timestamp.newBuilder().setSeconds(2000).build()) + .setHeartbeatDuration(Duration.newBuilder().setSeconds(5).build()) + .build(); + + ReadChangeStreamQuery clonedReq = query.clone(); + assertThat(clonedReq).isEqualTo(query); + assertThat(clonedReq.toProto(requestContext)).isEqualTo(request); + } +} From 53dd0f0f413e67e3e0243c7b1037c776225849ea Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Mon, 1 Aug 2022 11:09:57 -0400 Subject: [PATCH 04/13] feat: Add ChangeStreamMutation which is a ChangeStreamRecord (#1324) * Add ChangeStreamMutation which is a ChangeStreamRecord A ChangeStreamMutation holds a list of mods, represented by List, where an Entry is one of DeleteFamily/DeleteCells/SetCell. * fix: Fix styles * fix: Address comments * fix: Update Heartbeat to use AutoValue * fix: Add more comments * fix: Address comments * fix: Fix unit test due to toString(). Can't compare ByteString.toString() directly even though the contents are the same. So we compare their fields and toRowMutation. Co-authored-by: Teng Zhong --- .../data/v2/models/ChangeStreamMutation.java | 351 ++++++++++++++++++ .../bigtable/data/v2/models/DeleteCells.java | 47 +++ .../bigtable/data/v2/models/DeleteFamily.java | 34 ++ .../cloud/bigtable/data/v2/models/Entry.java | 26 ++ .../bigtable/data/v2/models/Heartbeat.java | 59 +-- .../bigtable/data/v2/models/SetCell.java | 53 +++ .../v2/models/ChangeStreamMutationTest.java | 330 ++++++++++++++++ .../v2/models/ChangeStreamRecordTest.java | 5 +- .../bigtable/data/v2/models/EntryTest.java | 101 +++++ 9 files changed, 958 insertions(+), 48 deletions(-) create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteCells.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteFamily.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Entry.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/EntryTest.java diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java new file mode 100644 index 0000000000..b79b184e7a --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java @@ -0,0 +1,351 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; +import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import javax.annotation.Nonnull; + +/** + * A ChangeStreamMutation represents a list of mods(represented by List<{@link Entry}>) targeted at + * a single row, which is concatenated by (TODO:ChangeStreamRecordMerger). It represents a logical + * row mutation and can be converted to the original write request(i.e. {@link RowMutation} or + * {@link RowMutationEntry}. + * + *

A ChangeStreamMutation can be constructed in two ways, depending on whether it's a user + * initiated mutation or a Garbage Collection mutation. Either way, the caller should explicitly set + * `token` and `lowWatermark` before build(), otherwise it'll raise an error. + * + *

Case 1) User initiated mutation. + * + *

{@code
+ * ChangeStreamMutation.Builder builder = ChangeStreamMutation.createUserMutation(...);
+ * builder.setCell(...);
+ * builder.deleteFamily(...);
+ * builder.deleteCells(...);
+ * ChangeStreamMutation changeStreamMutation = builder.setToken(...).setLowWatermark().build();
+ * }
+ * + * Case 2) Garbage Collection mutation. + * + *
{@code
+ * ChangeStreamMutation.Builder builder = ChangeStreamMutation.createGcMutation(...);
+ * builder.setCell(...);
+ * builder.deleteFamily(...);
+ * builder.deleteCells(...);
+ * ChangeStreamMutation changeStreamMutation = builder.setToken(...).setLowWatermark().build();
+ * }
+ */ +public final class ChangeStreamMutation implements ChangeStreamRecord, Serializable { + private static final long serialVersionUID = 8419520253162024218L; + + private final ByteString rowKey; + + /** Possible values: USER/GARBAGE_COLLECTION. */ + private final Type type; + + /** This should only be set when type==USER. */ + private final String sourceClusterId; + + private final Timestamp commitTimestamp; + + private final int tieBreaker; + + private transient ImmutableList.Builder entries = ImmutableList.builder(); + + private String token; + + private Timestamp lowWatermark; + + private ChangeStreamMutation(Builder builder) { + this.rowKey = builder.rowKey; + this.type = builder.type; + this.sourceClusterId = builder.sourceClusterId; + this.commitTimestamp = builder.commitTimestamp; + this.tieBreaker = builder.tieBreaker; + this.token = builder.token; + this.lowWatermark = builder.lowWatermark; + this.entries = builder.entries; + } + + /** + * Creates a new instance of a user initiated mutation. It returns a builder instead of a + * ChangeStreamMutation because `token` and `loWatermark` must be set later when we finish + * building the logical mutation. + */ + static Builder createUserMutation( + @Nonnull ByteString rowKey, + @Nonnull String sourceClusterId, + @Nonnull Timestamp commitTimestamp, + int tieBreaker) { + return new Builder(rowKey, Type.USER, sourceClusterId, commitTimestamp, tieBreaker); + } + + /** + * Creates a new instance of a GC mutation. It returns a builder instead of a ChangeStreamMutation + * because `token` and `loWatermark` must be set later when we finish building the logical + * mutation. + */ + static Builder createGcMutation( + @Nonnull ByteString rowKey, @Nonnull Timestamp commitTimestamp, int tieBreaker) { + return new Builder(rowKey, Type.GARBAGE_COLLECTION, null, commitTimestamp, tieBreaker); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + + @SuppressWarnings("unchecked") + ImmutableList deserialized = (ImmutableList) input.readObject(); + this.entries = ImmutableList.builder().addAll(deserialized); + } + + private void writeObject(ObjectOutputStream output) throws IOException { + output.defaultWriteObject(); + output.writeObject(entries.build()); + } + + /** Get the row key of the current mutation. */ + @Nonnull + public ByteString getRowKey() { + return this.rowKey; + } + + /** Get the type of the current mutation. */ + @Nonnull + public Type getType() { + return this.type; + } + + /** Get the source cluster id of the current mutation. Null for Garbage collection mutation. */ + public String getSourceClusterId() { + return this.sourceClusterId; + } + + /** Get the commit timestamp of the current mutation. */ + @Nonnull + public Timestamp getCommitTimestamp() { + return this.commitTimestamp; + } + + /** + * Get the tie breaker of the current mutation. This is used to resolve conflicts when multiple + * mutations are applied to different clusters at the same time. + */ + public int getTieBreaker() { + return this.tieBreaker; + } + + /** Get the token of the current mutation, which can be used to resume the changestream. */ + public String getToken() { + return this.token; + } + + /** Get the low watermark of the current mutation. */ + public Timestamp getLowWatermark() { + return this.lowWatermark; + } + + /** Get the list of mods of the current mutation. */ + @Nonnull + public List getEntries() { + return this.entries.build(); + } + + /** Returns a builder containing all the values of this ChangeStreamMutation class. */ + Builder toBuilder() { + return new Builder(this); + } + + /** Helper class to create a ChangeStreamMutation. */ + public static class Builder { + private final ByteString rowKey; + + private final Type type; + + private final String sourceClusterId; + + private final Timestamp commitTimestamp; + + private final int tieBreaker; + + private transient ImmutableList.Builder entries = ImmutableList.builder(); + + private String token; + + private Timestamp lowWatermark; + + private Builder( + ByteString rowKey, + Type type, + String sourceClusterId, + Timestamp commitTimestamp, + int tieBreaker) { + this.rowKey = rowKey; + this.type = type; + this.sourceClusterId = sourceClusterId; + this.commitTimestamp = commitTimestamp; + this.tieBreaker = tieBreaker; + } + + private Builder(ChangeStreamMutation changeStreamMutation) { + this.rowKey = changeStreamMutation.rowKey; + this.type = changeStreamMutation.type; + this.sourceClusterId = changeStreamMutation.sourceClusterId; + this.commitTimestamp = changeStreamMutation.commitTimestamp; + this.tieBreaker = changeStreamMutation.tieBreaker; + this.entries = changeStreamMutation.entries; + this.token = changeStreamMutation.token; + this.lowWatermark = changeStreamMutation.lowWatermark; + } + + Builder setCell( + @Nonnull String familyName, + @Nonnull ByteString qualifier, + long timestamp, + @Nonnull ByteString value) { + this.entries.add(SetCell.create(familyName, qualifier, timestamp, value)); + return this; + } + + Builder deleteCells( + @Nonnull String familyName, + @Nonnull ByteString qualifier, + @Nonnull TimestampRange timestampRange) { + this.entries.add(DeleteCells.create(familyName, qualifier, timestampRange)); + return this; + } + + Builder deleteFamily(@Nonnull String familyName) { + this.entries.add(DeleteFamily.create(familyName)); + return this; + } + + public Builder setToken(@Nonnull String token) { + this.token = token; + return this; + } + + public Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { + this.lowWatermark = lowWatermark; + return this; + } + + public ChangeStreamMutation build() { + Preconditions.checkArgument( + token != null && lowWatermark != null, + "ChangeStreamMutation must have a continuation token and low watermark."); + return new ChangeStreamMutation(this); + } + } + + public RowMutation toRowMutation(@Nonnull String tableId) { + RowMutation rowMutation = RowMutation.create(tableId, rowKey); + for (Entry entry : this.entries.build()) { + if (entry instanceof DeleteFamily) { + rowMutation.deleteFamily(((DeleteFamily) entry).getFamilyName()); + } else if (entry instanceof DeleteCells) { + DeleteCells deleteCells = (DeleteCells) entry; + rowMutation.deleteCells( + deleteCells.getFamilyName(), + deleteCells.getQualifier(), + deleteCells.getTimestampRange()); + } else if (entry instanceof SetCell) { + SetCell setCell = (SetCell) entry; + rowMutation.setCell( + setCell.getFamilyName(), + setCell.getQualifier(), + setCell.getTimestamp(), + setCell.getValue()); + } else { + throw new IllegalArgumentException("Unexpected Entry type."); + } + } + return rowMutation; + } + + public RowMutationEntry toRowMutationEntry() { + RowMutationEntry rowMutationEntry = RowMutationEntry.create(rowKey); + for (Entry entry : this.entries.build()) { + if (entry instanceof DeleteFamily) { + rowMutationEntry.deleteFamily(((DeleteFamily) entry).getFamilyName()); + } else if (entry instanceof DeleteCells) { + DeleteCells deleteCells = (DeleteCells) entry; + rowMutationEntry.deleteCells( + deleteCells.getFamilyName(), + deleteCells.getQualifier(), + deleteCells.getTimestampRange()); + } else if (entry instanceof SetCell) { + SetCell setCell = (SetCell) entry; + rowMutationEntry.setCell( + setCell.getFamilyName(), + setCell.getQualifier(), + setCell.getTimestamp(), + setCell.getValue()); + } else { + throw new IllegalArgumentException("Unexpected Entry type."); + } + } + return rowMutationEntry; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ChangeStreamMutation otherChangeStreamMutation = (ChangeStreamMutation) o; + return Objects.equal(this.hashCode(), otherChangeStreamMutation.hashCode()); + } + + @Override + public int hashCode() { + return Objects.hashCode( + rowKey, type, sourceClusterId, commitTimestamp, tieBreaker, token, lowWatermark, entries); + } + + @Override + public String toString() { + List entriesAsStrings = new ArrayList<>(); + for (Entry entry : this.entries.build()) { + entriesAsStrings.add(entry.toString()); + } + String entryString = "[" + String.join(";\t", entriesAsStrings) + "]"; + return MoreObjects.toStringHelper(this) + .add("rowKey", this.rowKey.toStringUtf8()) + .add("type", this.type) + .add("sourceClusterId", this.sourceClusterId) + .add("commitTimestamp", this.commitTimestamp.toString()) + .add("token", this.token) + .add("lowWatermark", this.lowWatermark) + .add("entries", entryString) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteCells.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteCells.java new file mode 100644 index 0000000000..238ddb1638 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteCells.java @@ -0,0 +1,47 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.auto.value.AutoValue; +import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; +import com.google.protobuf.ByteString; +import java.io.Serializable; +import javax.annotation.Nonnull; + +/** Representation of a DeleteCells mod in a data change. */ +@AutoValue +public abstract class DeleteCells implements Entry, Serializable { + private static final long serialVersionUID = 851772158721462017L; + + public static DeleteCells create( + @Nonnull String familyName, + @Nonnull ByteString qualifier, + @Nonnull TimestampRange timestampRange) { + return new AutoValue_DeleteCells(familyName, qualifier, timestampRange); + } + + /** Get the column family of the current DeleteCells. */ + @Nonnull + public abstract String getFamilyName(); + + /** Get the column qualifier of the current DeleteCells. */ + @Nonnull + public abstract ByteString getQualifier(); + + /** Get the timestamp range of the current DeleteCells. */ + @Nonnull + public abstract TimestampRange getTimestampRange(); +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteFamily.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteFamily.java new file mode 100644 index 0000000000..171ecccb41 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DeleteFamily.java @@ -0,0 +1,34 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.auto.value.AutoValue; +import java.io.Serializable; +import javax.annotation.Nonnull; + +/** Representation of a DeleteFamily mod in a data change. */ +@AutoValue +public abstract class DeleteFamily implements Entry, Serializable { + private static final long serialVersionUID = 81806775917145615L; + + public static DeleteFamily create(@Nonnull String familyName) { + return new AutoValue_DeleteFamily(familyName); + } + + /** Get the column family of the current DeleteFamily. */ + @Nonnull + public abstract String getFamilyName(); +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Entry.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Entry.java new file mode 100644 index 0000000000..c5c30016f4 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Entry.java @@ -0,0 +1,26 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.api.core.InternalExtensionOnly; + +/** + * Default representation of a mod in a data change, which can be a {@link DeleteFamily}, a {@link + * DeleteCells}, or a {@link SetCell} This class will be used by {@link ChangeStreamMutation} to + * represent a list of mods in a logical change stream mutation. + */ +@InternalExtensionOnly +public interface Entry {} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java index 73876f887b..db82657e49 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java @@ -16,64 +16,31 @@ package com.google.cloud.bigtable.data.v2.models; import com.google.api.core.InternalApi; +import com.google.auto.value.AutoValue; import com.google.bigtable.v2.ReadChangeStreamResponse; -import com.google.common.base.MoreObjects; -import com.google.common.base.Objects; import com.google.protobuf.Timestamp; import java.io.Serializable; import javax.annotation.Nonnull; -public final class Heartbeat implements ChangeStreamRecord, Serializable { +@AutoValue +public abstract class Heartbeat implements ChangeStreamRecord, Serializable { private static final long serialVersionUID = 7316215828353608504L; - private final Timestamp lowWatermark; - private final ChangeStreamContinuationToken changeStreamContinuationToken; - private Heartbeat( - Timestamp lowWatermark, ChangeStreamContinuationToken changeStreamContinuationToken) { - this.lowWatermark = lowWatermark; - this.changeStreamContinuationToken = changeStreamContinuationToken; - } - - @InternalApi("Used in Changestream beam pipeline.") - public ChangeStreamContinuationToken getChangeStreamContinuationToken() { - return changeStreamContinuationToken; - } - - @InternalApi("Used in Changestream beam pipeline.") - public Timestamp getLowWatermark() { - return lowWatermark; + public static Heartbeat create( + ChangeStreamContinuationToken changeStreamContinuationToken, Timestamp lowWatermark) { + return new AutoValue_Heartbeat(changeStreamContinuationToken, lowWatermark); } /** Wraps the protobuf {@link ReadChangeStreamResponse.Heartbeat}. */ static Heartbeat fromProto(@Nonnull ReadChangeStreamResponse.Heartbeat heartbeat) { - return new Heartbeat( - heartbeat.getLowWatermark(), - ChangeStreamContinuationToken.fromProto(heartbeat.getContinuationToken())); + return create( + ChangeStreamContinuationToken.fromProto(heartbeat.getContinuationToken()), + heartbeat.getLowWatermark()); } - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Heartbeat record = (Heartbeat) o; - return Objects.equal(lowWatermark, record.getLowWatermark()) - && Objects.equal(changeStreamContinuationToken, record.getChangeStreamContinuationToken()); - } - - @Override - public int hashCode() { - return Objects.hashCode(lowWatermark, changeStreamContinuationToken); - } + @InternalApi("Used in Changestream beam pipeline.") + public abstract ChangeStreamContinuationToken getChangeStreamContinuationToken(); - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("lowWatermark", lowWatermark) - .add("changeStreamContinuationToken", changeStreamContinuationToken) - .toString(); - } + @InternalApi("Used in Changestream beam pipeline.") + public abstract Timestamp getLowWatermark(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java new file mode 100644 index 0000000000..a157b5cd73 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java @@ -0,0 +1,53 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.auto.value.AutoValue; +import com.google.protobuf.ByteString; +import java.io.Serializable; +import javax.annotation.Nonnull; + +/** + * Representation of a SetCell mod in a data change, whose value is concatenated by + * (TODO:ChangeStreamRecordMerger) in case of SetCell value chunking. + */ +@AutoValue +public abstract class SetCell implements Entry, Serializable { + private static final long serialVersionUID = 77123872266724154L; + + public static SetCell create( + @Nonnull String familyName, + @Nonnull ByteString qualifier, + long timestamp, + @Nonnull ByteString value) { + return new AutoValue_SetCell(familyName, qualifier, timestamp, value); + } + + /** Get the column family of the current SetCell. */ + @Nonnull + public abstract String getFamilyName(); + + /** Get the column qualifier of the current SetCell. */ + @Nonnull + public abstract ByteString getQualifier(); + + /** Get the timestamp of the current SetCell. */ + public abstract long getTimestamp(); + + /** Get the value of the current SetCell. */ + @Nonnull + public abstract ByteString getValue(); +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java new file mode 100644 index 0000000000..938213fb36 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java @@ -0,0 +1,330 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.MutateRowRequest; +import com.google.bigtable.v2.MutateRowsRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.cloud.bigtable.data.v2.internal.NameUtil; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.common.primitives.Longs; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ChangeStreamMutationTest { + private static final String PROJECT_ID = "fake-project"; + private static final String INSTANCE_ID = "fake-instance"; + private static final String TABLE_ID = "fake-table"; + private static final String APP_PROFILE_ID = "fake-profile"; + private static final RequestContext REQUEST_CONTEXT = + RequestContext.create(PROJECT_ID, INSTANCE_ID, APP_PROFILE_ID); + + @Rule public ExpectedException expect = ExpectedException.none(); + + @Test + public void userInitiatedMutationTest() throws IOException, ClassNotFoundException { + // Create a user initiated logical mutation. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")) + .deleteFamily("fake-family") + .deleteCells( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Test the getters. + Assert.assertEquals(changeStreamMutation.getRowKey(), ByteString.copyFromUtf8("key")); + Assert.assertEquals( + changeStreamMutation.getType(), ReadChangeStreamResponse.DataChange.Type.USER); + Assert.assertEquals(changeStreamMutation.getSourceClusterId(), "fake-source-cluster-id"); + Assert.assertEquals(changeStreamMutation.getCommitTimestamp(), fakeCommitTimestamp); + Assert.assertEquals(changeStreamMutation.getTieBreaker(), 0); + Assert.assertEquals(changeStreamMutation.getToken(), "fake-token"); + Assert.assertEquals(changeStreamMutation.getLowWatermark(), fakeLowWatermark); + + // Test serialization. + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(changeStreamMutation); + oos.close(); + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + ChangeStreamMutation actual = (ChangeStreamMutation) ois.readObject(); + Assert.assertEquals(actual.getRowKey(), changeStreamMutation.getRowKey()); + Assert.assertEquals(actual.getType(), changeStreamMutation.getType()); + Assert.assertEquals(actual.getSourceClusterId(), changeStreamMutation.getSourceClusterId()); + Assert.assertEquals(actual.getCommitTimestamp(), changeStreamMutation.getCommitTimestamp()); + Assert.assertEquals(actual.getTieBreaker(), changeStreamMutation.getTieBreaker()); + Assert.assertEquals(actual.getToken(), changeStreamMutation.getToken()); + Assert.assertEquals(actual.getLowWatermark(), changeStreamMutation.getLowWatermark()); + assertThat(actual.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)) + .isEqualTo(changeStreamMutation.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)); + } + + @Test + public void gcMutationTest() throws IOException, ClassNotFoundException { + // Create a GC mutation. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createGcMutation( + ByteString.copyFromUtf8("key"), fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")) + .deleteFamily("fake-family") + .deleteCells( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Test the getters. + Assert.assertEquals(changeStreamMutation.getRowKey(), ByteString.copyFromUtf8("key")); + Assert.assertEquals( + changeStreamMutation.getType(), + ReadChangeStreamResponse.DataChange.Type.GARBAGE_COLLECTION); + Assert.assertNull(changeStreamMutation.getSourceClusterId()); + Assert.assertEquals(changeStreamMutation.getCommitTimestamp(), fakeCommitTimestamp); + Assert.assertEquals(changeStreamMutation.getTieBreaker(), 0); + Assert.assertEquals(changeStreamMutation.getToken(), "fake-token"); + Assert.assertEquals(changeStreamMutation.getLowWatermark(), fakeLowWatermark); + + // Test serialization. + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(changeStreamMutation); + oos.close(); + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + ChangeStreamMutation actual = (ChangeStreamMutation) ois.readObject(); + Assert.assertEquals(actual.getRowKey(), changeStreamMutation.getRowKey()); + Assert.assertEquals(actual.getType(), changeStreamMutation.getType()); + Assert.assertEquals(actual.getSourceClusterId(), changeStreamMutation.getSourceClusterId()); + Assert.assertEquals(actual.getCommitTimestamp(), changeStreamMutation.getCommitTimestamp()); + Assert.assertEquals(actual.getTieBreaker(), changeStreamMutation.getTieBreaker()); + Assert.assertEquals(actual.getToken(), changeStreamMutation.getToken()); + Assert.assertEquals(actual.getLowWatermark(), changeStreamMutation.getLowWatermark()); + assertThat(actual.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)) + .isEqualTo(changeStreamMutation.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)); + } + + @Test + public void toRowMutationTest() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")) + .deleteFamily("fake-family") + .deleteCells( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Convert it to a rowMutation and construct a MutateRowRequest. + RowMutation rowMutation = changeStreamMutation.toRowMutation(TABLE_ID); + MutateRowRequest mutateRowRequest = rowMutation.toProto(REQUEST_CONTEXT); + String tableName = + NameUtil.formatTableName( + REQUEST_CONTEXT.getProjectId(), REQUEST_CONTEXT.getInstanceId(), TABLE_ID); + assertThat(mutateRowRequest.getTableName()).isEqualTo(tableName); + assertThat(mutateRowRequest.getMutationsList()).hasSize(3); + assertThat(mutateRowRequest.getMutations(0).getSetCell().getValue()) + .isEqualTo(ByteString.copyFromUtf8("fake-value")); + assertThat(mutateRowRequest.getMutations(1).getDeleteFromFamily().getFamilyName()) + .isEqualTo("fake-family"); + assertThat(mutateRowRequest.getMutations(2).getDeleteFromColumn().getFamilyName()) + .isEqualTo("fake-family"); + assertThat(mutateRowRequest.getMutations(2).getDeleteFromColumn().getColumnQualifier()) + .isEqualTo(ByteString.copyFromUtf8("fake-qualifier")); + } + + @Test(expected = IllegalArgumentException.class) + public void toRowMutationWithoutTokenShouldFailTest() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setLowWatermark(fakeLowWatermark) + .build(); + expect.expect(IllegalArgumentException.class); + } + + @Test(expected = IllegalArgumentException.class) + public void toRowMutationWithoutLowWatermarkShouldFailTest() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setToken("fake-token") + .build(); + expect.expect(IllegalArgumentException.class); + } + + @Test + public void toRowMutationEntryTest() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")) + .deleteFamily("fake-family") + .deleteCells( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Convert it to a rowMutationEntry and construct a MutateRowRequest. + RowMutationEntry rowMutationEntry = changeStreamMutation.toRowMutationEntry(); + MutateRowsRequest.Entry mutateRowsRequestEntry = rowMutationEntry.toProto(); + assertThat(mutateRowsRequestEntry.getRowKey()).isEqualTo(ByteString.copyFromUtf8("key")); + assertThat(mutateRowsRequestEntry.getMutationsList()).hasSize(3); + assertThat(mutateRowsRequestEntry.getMutations(0).getSetCell().getValue()) + .isEqualTo(ByteString.copyFromUtf8("fake-value")); + assertThat(mutateRowsRequestEntry.getMutations(1).getDeleteFromFamily().getFamilyName()) + .isEqualTo("fake-family"); + assertThat(mutateRowsRequestEntry.getMutations(2).getDeleteFromColumn().getFamilyName()) + .isEqualTo("fake-family"); + assertThat(mutateRowsRequestEntry.getMutations(2).getDeleteFromColumn().getColumnQualifier()) + .isEqualTo(ByteString.copyFromUtf8("fake-qualifier")); + } + + @Test(expected = IllegalArgumentException.class) + public void toRowMutationEntryWithoutTokenShouldFailTest() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setLowWatermark(fakeLowWatermark) + .build(); + expect.expect(IllegalArgumentException.class); + } + + @Test(expected = IllegalArgumentException.class) + public void toRowMutationEntryWithoutLowWatermarkShouldFailTest() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setToken("fake-token") + .build(); + expect.expect(IllegalArgumentException.class); + } + + @Test + public void testWithLongValue() { + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000L, + ByteString.copyFrom(Longs.toByteArray(1L))) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + RowMutation rowMutation = changeStreamMutation.toRowMutation(TABLE_ID); + MutateRowRequest mutateRowRequest = rowMutation.toProto(REQUEST_CONTEXT); + String tableName = + NameUtil.formatTableName( + REQUEST_CONTEXT.getProjectId(), REQUEST_CONTEXT.getInstanceId(), TABLE_ID); + assertThat(mutateRowRequest.getTableName()).isEqualTo(tableName); + assertThat(mutateRowRequest.getMutationsList()).hasSize(1); + assertThat(mutateRowRequest.getMutations(0).getSetCell().getValue()) + .isEqualTo(ByteString.copyFromUtf8("\000\000\000\000\000\000\000\001")); + } + + @Test + public void toBuilderTest() { + // Create a user initiated logical mutation. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation changeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")) + .deleteFamily("fake-family") + .deleteCells( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Test round-trip of a ChangeStreamMutation through `toBuilder().build()`. + ChangeStreamMutation otherMutation = changeStreamMutation.toBuilder().build(); + assertThat(changeStreamMutation).isEqualTo(otherMutation); + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java index c82aae7330..05df603959 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java @@ -23,6 +23,7 @@ import com.google.bigtable.v2.StreamPartition; import com.google.protobuf.ByteString; import com.google.protobuf.Timestamp; +import com.google.rpc.Status; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -57,7 +58,7 @@ public void heartbeatSerializationTest() throws IOException, ClassNotFoundExcept @Test public void closeStreamSerializationTest() throws IOException, ClassNotFoundException { - com.google.rpc.Status status = com.google.rpc.Status.newBuilder().setCode(0).build(); + Status status = Status.newBuilder().setCode(0).build(); RowRange rowRange1 = RowRange.newBuilder() .setStartKeyClosed(ByteString.copyFromUtf8("")) @@ -124,7 +125,7 @@ public void heartbeatTest() { @Test public void closeStreamTest() { - com.google.rpc.Status status = com.google.rpc.Status.newBuilder().setCode(0).build(); + Status status = Status.newBuilder().setCode(0).build(); RowRange rowRange1 = RowRange.newBuilder() .setStartKeyClosed(ByteString.copyFromUtf8("")) diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/EntryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/EntryTest.java new file mode 100644 index 0000000000..11ff0a9f02 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/EntryTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.protobuf.ByteString; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class EntryTest { + private void validateSerializationRoundTrip(Object obj) + throws IOException, ClassNotFoundException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(obj); + oos.close(); + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + assertThat(ois.readObject()).isEqualTo(obj); + } + + @Test + public void serializationTest() throws IOException, ClassNotFoundException { + // DeleteFamily + Entry deleteFamilyEntry = DeleteFamily.create("fake-family"); + validateSerializationRoundTrip(deleteFamilyEntry); + + // DeleteCell + Entry deleteCellsEntry = + DeleteCells.create( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)); + validateSerializationRoundTrip(deleteCellsEntry); + + // SetCell + Entry setCellEntry = + SetCell.create( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")); + validateSerializationRoundTrip(setCellEntry); + } + + @Test + public void deleteFamilyTest() { + Entry deleteFamilyEntry = DeleteFamily.create("fake-family"); + DeleteFamily deleteFamily = (DeleteFamily) deleteFamilyEntry; + Assert.assertEquals("fake-family", deleteFamily.getFamilyName()); + } + + @Test + public void deleteCellsTest() { + Entry deleteCellEntry = + DeleteCells.create( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)); + DeleteCells deleteCells = (DeleteCells) deleteCellEntry; + Assert.assertEquals("fake-family", deleteCells.getFamilyName()); + Assert.assertEquals(ByteString.copyFromUtf8("fake-qualifier"), deleteCells.getQualifier()); + Assert.assertEquals(Range.TimestampRange.create(1000L, 2000L), deleteCells.getTimestampRange()); + } + + @Test + public void setSellTest() { + Entry setCellEntry = + SetCell.create( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 1000, + ByteString.copyFromUtf8("fake-value")); + SetCell setCell = (SetCell) setCellEntry; + Assert.assertEquals("fake-family", setCell.getFamilyName()); + Assert.assertEquals(ByteString.copyFromUtf8("fake-qualifier"), setCell.getQualifier()); + Assert.assertEquals(1000, setCell.getTimestamp()); + Assert.assertEquals(ByteString.copyFromUtf8("fake-value"), setCell.getValue()); + } +} From cb7b455f340b7354fbf8342e685cf5fdac8b7fad Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Wed, 3 Aug 2022 15:52:39 -0400 Subject: [PATCH 05/13] feat: Add ChangeStreamRecordAdapter and ChangeStreamStateMachine (#1334) * Add ChangeStreamRecordAdapter and ChangeStreamStateMachine These will be used later for ChangeStreamMergingCallable. * fix: Fix styles and add some tests. * fix: Address comments * fix: Update comments Co-authored-by: Teng Zhong --- .../data/v2/models/ChangeStreamMutation.java | 20 +- .../v2/models/ChangeStreamRecordAdapter.java | 173 ++++++ .../DefaultChangeStreamRecordAdapter.java | 175 ++++++ .../bigtable/data/v2/models/Heartbeat.java | 2 +- .../ChangeStreamStateMachine.java | 582 ++++++++++++++++++ .../v2/models/ChangeStreamMutationTest.java | 20 +- .../DefaultChangeStreamRecordAdapterTest.java | 446 ++++++++++++++ .../ChangeStreamStateMachineTest.java | 61 ++ 8 files changed, 1457 insertions(+), 22 deletions(-) create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordAdapter.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapter.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapterTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachineTest.java diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java index b79b184e7a..10571ecd1f 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java @@ -321,14 +321,28 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - ChangeStreamMutation otherChangeStreamMutation = (ChangeStreamMutation) o; - return Objects.equal(this.hashCode(), otherChangeStreamMutation.hashCode()); + ChangeStreamMutation other = (ChangeStreamMutation) o; + return Objects.equal(this.rowKey, other.rowKey) + && Objects.equal(this.type, other.type) + && Objects.equal(this.sourceClusterId, other.sourceClusterId) + && Objects.equal(this.commitTimestamp, other.commitTimestamp) + && Objects.equal(this.tieBreaker, other.tieBreaker) + && Objects.equal(this.token, other.token) + && Objects.equal(this.lowWatermark, other.lowWatermark) + && Objects.equal(this.entries.build(), other.entries.build()); } @Override public int hashCode() { return Objects.hashCode( - rowKey, type, sourceClusterId, commitTimestamp, tieBreaker, token, lowWatermark, entries); + rowKey, + type, + sourceClusterId, + commitTimestamp, + tieBreaker, + token, + lowWatermark, + entries.build()); } @Override diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordAdapter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordAdapter.java new file mode 100644 index 0000000000..6e9715a407 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordAdapter.java @@ -0,0 +1,173 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.api.core.InternalApi; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import javax.annotation.Nonnull; + +/** + * An extension point that allows end users to plug in a custom implementation of logical change + * stream records. This is useful in cases where the user would like to apply advanced client side + * filtering(for example, only keep DeleteFamily in the mutations). This adapter acts like a factory + * for a SAX style change stream record builder. + */ +public interface ChangeStreamRecordAdapter { + /** Creates a new instance of a {@link ChangeStreamRecordBuilder}. */ + ChangeStreamRecordBuilder createChangeStreamRecordBuilder(); + + /** Checks if the given change stream record is a Heartbeat. */ + @InternalApi("Used in Changestream beam pipeline.") + boolean isHeartbeat(ChangeStreamRecordT record); + + /** + * Get the token from the given Heartbeat record. If the given record is not a Heartbeat, it will + * throw an Exception. + */ + @InternalApi("Used in Changestream beam pipeline.") + String getTokenFromHeartbeat(ChangeStreamRecordT heartbeatRecord); + + /** Checks if the given change stream record is a ChangeStreamMutation. */ + @InternalApi("Used in Changestream beam pipeline.") + boolean isChangeStreamMutation(ChangeStreamRecordT record); + + /** + * Get the token from the given ChangeStreamMutation record. If the given record is not a + * ChangeStreamMutation, it will throw an Exception. + */ + @InternalApi("Used in Changestream beam pipeline.") + String getTokenFromChangeStreamMutation(ChangeStreamRecordT record); + + /** + * A SAX style change stream record factory. It is responsible for creating one of the three types + * of change stream record: heartbeat, close stream, and a change stream mutation. + * + *

State management is handled external to the implementation of this class: + * + *

    + * Case 1: Heartbeat + *
  1. Exactly 1 {@code onHeartbeat}. + *
+ * + *
    + * Case 2: CloseStream + *
  1. Exactly 1 {@code onCloseStream}. + *
+ * + *
    + * Case 3: ChangeStreamMutation. A change stream mutation consists of one or more mods, where + * the SetCells might be chunked. There are 3 different types of mods that a ReadChangeStream + * response can have: + *
  1. DeleteFamily -> Exactly 1 {@code deleteFamily} + *
  2. DeleteCell -> Exactly 1 {@code deleteCell} + *
  3. SetCell -> Exactly 1 {@code startCell}, At least 1 {@code CellValue}, Exactly 1 {@code + * finishCell}. + *
+ * + *

The whole flow of constructing a ChangeStreamMutation is: + * + *

    + *
  1. Exactly 1 {@code startUserMutation} or {@code startGcMutation}. + *
  2. At least 1 DeleteFamily/DeleteCell/SetCell mods. + *
  3. Exactly 1 {@code finishChangeStreamMutation}. + *
+ * + *

Note: For a non-chunked SetCell, only 1 {@code CellValue} will be called. For a chunked + * SetCell, more than 1 {@code CellValue}s will be called. + * + *

Note: DeleteRow's won't appear in data changes since they'll be converted to multiple + * DeleteFamily's. + */ + interface ChangeStreamRecordBuilder { + /** + * Called to create a heartbeat. This will be called at most once. If called, the current change + * stream record must not include any data changes or close stream messages. + */ + ChangeStreamRecordT onHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat); + + /** + * Called to create a close stream message. This will be called at most once. If called, the + * current change stream record must not include any data changes or heartbeats. + */ + ChangeStreamRecordT onCloseStream(ReadChangeStreamResponse.CloseStream closeStream); + + /** + * Called to start a new user initiated ChangeStreamMutation. This will be called at most once. + * If called, the current change stream record must not include any close stream message or + * heartbeat. + */ + void startUserMutation( + @Nonnull ByteString rowKey, + @Nonnull String sourceClusterId, + @Nonnull Timestamp commitTimestamp, + int tieBreaker); + + /** + * Called to start a new Garbage Collection ChangeStreamMutation. This will be called at most + * once. If called, the current change stream record must not include any close stream message + * or heartbeat. + */ + void startGcMutation( + @Nonnull ByteString rowKey, @Nonnull Timestamp commitTimestamp, int tieBreaker); + + /** Called to add a DeleteFamily mod. */ + void deleteFamily(@Nonnull String familyName); + + /** Called to add a DeleteCell mod. */ + void deleteCells( + @Nonnull String familyName, + @Nonnull ByteString qualifier, + @Nonnull TimestampRange timestampRange); + + /** + * Called to start a SetCell. + * + *

    + * In case of a non-chunked cell, the following order is guaranteed: + *
  1. Exactly 1 {@code startCell}. + *
  2. Exactly 1 {@code cellValue}. + *
  3. Exactly 1 {@code finishCell}. + *
+ * + *
    + * In case of a chunked cell, the following order is guaranteed: + *
  1. Exactly 1 {@code startCell}. + *
  2. At least 2 {@code cellValue}. + *
  3. Exactly 1 {@code finishCell}. + *
+ */ + void startCell(String family, ByteString qualifier, long timestampMicros); + + /** + * Called once per non-chunked cell, or at least twice per chunked cell to concatenate the cell + * value. + */ + void cellValue(ByteString value); + + /** Called once per cell to signal the end of the value (unless reset). */ + void finishCell(); + + /** Called once per stream record to signal that all mods have been processed (unless reset). */ + ChangeStreamRecordT finishChangeStreamMutation( + @Nonnull String token, @Nonnull Timestamp lowWatermark); + + /** Called when the current in progress change stream record should be dropped */ + void reset(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapter.java new file mode 100644 index 0000000000..d8eb632e54 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapter.java @@ -0,0 +1,175 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import javax.annotation.Nonnull; + +/** + * Default implementation of a {@link ChangeStreamRecordAdapter} that uses {@link + * ChangeStreamRecord}s to represent change stream records. + */ +public class DefaultChangeStreamRecordAdapter + implements ChangeStreamRecordAdapter { + + /** {@inheritDoc} */ + @Override + public ChangeStreamRecordBuilder createChangeStreamRecordBuilder() { + return new DefaultChangeStreamRecordBuilder(); + } + + /** {@inheritDoc} */ + @Override + public boolean isHeartbeat(ChangeStreamRecord record) { + return record instanceof Heartbeat; + } + + /** {@inheritDoc} */ + @Override + public String getTokenFromHeartbeat(ChangeStreamRecord record) { + Preconditions.checkArgument(isHeartbeat(record), "record is not a Heartbeat."); + return ((Heartbeat) record).getChangeStreamContinuationToken().getToken(); + } + + /** {@inheritDoc} */ + @Override + public boolean isChangeStreamMutation(ChangeStreamRecord record) { + return record instanceof ChangeStreamMutation; + } + + /** {@inheritDoc} */ + @Override + public String getTokenFromChangeStreamMutation(ChangeStreamRecord record) { + Preconditions.checkArgument( + isChangeStreamMutation(record), "record is not a ChangeStreamMutation."); + return ((ChangeStreamMutation) record).getToken(); + } + + /** {@inheritDoc} */ + static class DefaultChangeStreamRecordBuilder + implements ChangeStreamRecordBuilder { + private ChangeStreamMutation.Builder changeStreamMutationBuilder = null; + + // For the current SetCell. + private String family; + private ByteString qualifier; + private long timestampMicros; + private ByteString value; + + public DefaultChangeStreamRecordBuilder() { + reset(); + } + + /** {@inheritDoc} */ + @Override + public ChangeStreamRecord onHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + Preconditions.checkArgument( + this.changeStreamMutationBuilder == null, + "Can not create a Heartbeat when there is an existing ChangeStreamMutation being built."); + return Heartbeat.fromProto(heartbeat); + } + + /** {@inheritDoc} */ + @Override + public ChangeStreamRecord onCloseStream(ReadChangeStreamResponse.CloseStream closeStream) { + Preconditions.checkArgument( + this.changeStreamMutationBuilder == null, + "Can not create a CloseStream when there is an existing ChangeStreamMutation being built."); + return CloseStream.fromProto(closeStream); + } + + /** {@inheritDoc} */ + @Override + public void startUserMutation( + @Nonnull ByteString rowKey, + @Nonnull String sourceClusterId, + @Nonnull Timestamp commitTimestamp, + int tieBreaker) { + this.changeStreamMutationBuilder = + ChangeStreamMutation.createUserMutation( + rowKey, sourceClusterId, commitTimestamp, tieBreaker); + } + + /** {@inheritDoc} */ + @Override + public void startGcMutation( + @Nonnull ByteString rowKey, @Nonnull Timestamp commitTimestamp, int tieBreaker) { + this.changeStreamMutationBuilder = + ChangeStreamMutation.createGcMutation(rowKey, commitTimestamp, tieBreaker); + } + + /** {@inheritDoc} */ + @Override + public void deleteFamily(@Nonnull String familyName) { + this.changeStreamMutationBuilder.deleteFamily(familyName); + } + + /** {@inheritDoc} */ + @Override + public void deleteCells( + @Nonnull String familyName, + @Nonnull ByteString qualifier, + @Nonnull TimestampRange timestampRange) { + this.changeStreamMutationBuilder.deleteCells(familyName, qualifier, timestampRange); + } + + /** {@inheritDoc} */ + @Override + public void startCell(String family, ByteString qualifier, long timestampMicros) { + this.family = family; + this.qualifier = qualifier; + this.timestampMicros = timestampMicros; + this.value = ByteString.EMPTY; + } + + /** {@inheritDoc} */ + @Override + public void cellValue(ByteString value) { + this.value = this.value.concat(value); + } + + /** {@inheritDoc} */ + @Override + public void finishCell() { + this.changeStreamMutationBuilder.setCell( + this.family, this.qualifier, this.timestampMicros, this.value); + } + + /** {@inheritDoc} */ + @Override + public ChangeStreamRecord finishChangeStreamMutation( + @Nonnull String token, @Nonnull Timestamp lowWatermark) { + this.changeStreamMutationBuilder.setToken(token); + this.changeStreamMutationBuilder.setLowWatermark(lowWatermark); + return this.changeStreamMutationBuilder.build(); + } + + /** {@inheritDoc} */ + @Override + public void reset() { + changeStreamMutationBuilder = null; + + family = null; + qualifier = null; + timestampMicros = 0; + value = null; + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java index db82657e49..f2371c8507 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java @@ -26,7 +26,7 @@ public abstract class Heartbeat implements ChangeStreamRecord, Serializable { private static final long serialVersionUID = 7316215828353608504L; - public static Heartbeat create( + private static Heartbeat create( ChangeStreamContinuationToken changeStreamContinuationToken, Timestamp lowWatermark) { return new AutoValue_Heartbeat(changeStreamContinuationToken, lowWatermark); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java new file mode 100644 index 0000000000..7ab7fa2b7b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java @@ -0,0 +1,582 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecordAdapter.ChangeStreamRecordBuilder; +import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; +import com.google.common.base.Preconditions; + +/** + * A state machine to produce change stream records from a stream of {@link + * ReadChangeStreamResponse}. A change stream record can be a Heartbeat, a CloseStream or a + * ChangeStreamMutation. + * + *

There could be two types of chunking for a ChangeStreamMutation: + * + *

    + *
  • Non-SetCell chunking. For example, a ChangeStreamMutation has two mods, DeleteFamily and + * DeleteColumn. DeleteFamily is sent in the first {@link ReadChangeStreamResponse} and + * DeleteColumn is sent in the second {@link ReadChangeStreamResponse}. + *
  • {@link ReadChangeStreamResponse.MutationChunk} has a chunked {@link + * com.google.bigtable.v2.Mutation.SetCell} mutation. For example, a logical mutation has one + * big {@link Mutation.SetCell} mutation which is chunked into two {@link + * ReadChangeStreamResponse}s. The first {@link ReadChangeStreamResponse.DataChange} has the + * first half of the cell value, and the second {@link ReadChangeStreamResponse.DataChange} + * has the second half. + *
+ * + * This state machine handles both types of chunking. + * + *

Building of the actual change stream record object is delegated to a {@link + * ChangeStreamRecordBuilder}. This class is not thread safe. + * + *

The inputs are: + * + *

    + *
  • {@link ReadChangeStreamResponse.Heartbeat}s. + *
  • {@link ReadChangeStreamResponse.CloseStream}s. + *
  • {@link ReadChangeStreamResponse.DataChange}s, that must be merged to a + * ChangeStreamMutation. + *
  • ChangeStreamRecord consumption events that reset the state machine for the next change + * stream record. + *
+ * + *

The outputs are: + * + *

    + *
  • Heartbeat records. + *
  • CloseStream records. + *
  • ChangeStreamMutation records. + *
+ * + *

Expected Usage: + * + *

{@code
+ * ChangeStreamStateMachine changeStreamStateMachine = new ChangeStreamStateMachine<>(myChangeStreamRecordAdapter);
+ * while(responseIterator.hasNext()) {
+ *   ReadChangeStreamResponse response = responseIterator.next();
+ *   if (response.hasHeartbeat()) {
+ *     changeStreamStateMachine.handleHeartbeat(response.getHeartbeat());
+ *   } else if (response.hasCloseStream()) {
+ *     changeStreamStateMachine.handleCloseStream(response.getCloseStream());
+ *   } else {
+ *       changeStreamStateMachine.handleDataChange(response.getDataChange());
+ *   }
+ *   if (changeStreamStateMachine.hasCompleteChangeStreamRecord()) {
+ *       MyChangeStreamRecord = changeStreamStateMachine.consumeChangeStreamRecord();
+ *       // do something with the change stream record.
+ *   }
+ * }
+ * }
+ * + *

Package-private for internal use. + * + * @param The type of row the adapter will build + */ +final class ChangeStreamStateMachine { + private final ChangeStreamRecordBuilder builder; + private State currentState; + // debug stats + private int numHeartbeats = 0; + private int numCloseStreams = 0; + private int numDataChanges = 0; + private int numNonCellMods = 0; + private int numCellChunks = 0; // 1 for non-chunked cell. + private int actualTotalSizeOfChunkedSetCell = 0; + private ChangeStreamRecordT completeChangeStreamRecord; + + /** + * Initialize a new state machine that's ready for a new change stream record. + * + * @param builder The builder that will build the final change stream record. + */ + ChangeStreamStateMachine(ChangeStreamRecordBuilder builder) { + this.builder = builder; + reset(); + } + + /** + * Handle heartbeat events from the server. + * + *

+ *
Valid states: + *
{@link ChangeStreamStateMachine#AWAITING_NEW_STREAM_RECORD} + *
Resulting states: + *
{@link ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME} + *
+ */ + void handleHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + try { + numHeartbeats++; + currentState = currentState.handleHeartbeat(heartbeat); + } catch (RuntimeException e) { + currentState = null; + throw e; + } + } + + /** + * Handle CloseStream events from the server. + * + *
+ *
Valid states: + *
{@link ChangeStreamStateMachine#AWAITING_NEW_STREAM_RECORD} + *
Resulting states: + *
{@link ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME} + *
+ */ + void handleCloseStream(ReadChangeStreamResponse.CloseStream closeStream) { + try { + numCloseStreams++; + currentState = currentState.handleCloseStream(closeStream); + } catch (RuntimeException e) { + currentState = null; + throw e; + } + } + + /** + * Feeds a new dataChange into the state machine. If the dataChange is invalid, the state machine + * will throw an exception and should not be used for further input. + * + *
+ *
Valid states: + *
{@link ChangeStreamStateMachine#AWAITING_NEW_STREAM_RECORD} + *
{@link ChangeStreamStateMachine#AWAITING_NEW_MOD} + *
{@link ChangeStreamStateMachine#AWAITING_CELL_VALUE} + *
Resulting states: + *
{@link ChangeStreamStateMachine#AWAITING_NEW_MOD} + *
{@link ChangeStreamStateMachine#AWAITING_CELL_VALUE} + *
{@link ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME} + *
+ * + * @param dataChange The new chunk to process. + * @throws ChangeStreamStateMachine.InvalidInputException When the chunk is not applicable to the + * current state. + * @throws IllegalStateException When the internal state is inconsistent + */ + void handleDataChange(ReadChangeStreamResponse.DataChange dataChange) { + try { + numDataChanges++; + currentState = currentState.handleMod(dataChange, 0); + } catch (RuntimeException e) { + currentState = null; + throw e; + } + } + + /** + * Returns the completed change stream record and transitions to {@link + * ChangeStreamStateMachine#AWAITING_NEW_STREAM_RECORD}. + * + * @return The completed change stream record. + * @throws IllegalStateException If the last dataChange did not complete a change stream record. + */ + ChangeStreamRecordT consumeChangeStreamRecord() { + Preconditions.checkState( + completeChangeStreamRecord != null, "No change stream record to consume."); + Preconditions.checkState( + currentState == AWAITING_STREAM_RECORD_CONSUME, + "Change stream record is not ready to consume: " + currentState); + ChangeStreamRecordT changeStreamRecord = completeChangeStreamRecord; + reset(); + return changeStreamRecord; + } + + /** Checks if there is a complete change stream record to be consumed. */ + boolean hasCompleteChangeStreamRecord() { + return completeChangeStreamRecord != null && currentState == AWAITING_STREAM_RECORD_CONSUME; + } + /** + * Checks if the state machine is in the middle of processing a change stream record. + * + * @return True If there is a change stream record in progress. + */ + boolean isChangeStreamRecordInProgress() { + return currentState != AWAITING_NEW_STREAM_RECORD; + } + + private void reset() { + currentState = AWAITING_NEW_STREAM_RECORD; + numHeartbeats = 0; + numCloseStreams = 0; + numDataChanges = 0; + numNonCellMods = 0; + numCellChunks = 0; + actualTotalSizeOfChunkedSetCell = 0; + completeChangeStreamRecord = null; + + builder.reset(); + } + + /** + * Base class for all the state machine's internal states. + * + *

Each state can consume 3 events: Heartbeat, CloseStream and a Mod. By default, the default + * implementation will just throw an IllegalStateException unless the subclass adds explicit + * handling for these events. + */ + abstract static class State { + /** + * Accepts a Heartbeat by the server. And completes the current change stream record. + * + * @throws IllegalStateException If the subclass can't handle heartbeat events. + */ + ChangeStreamStateMachine.State handleHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + throw new IllegalStateException(); + } + + /** + * Accepts a CloseStream by the server. And completes the current change stream record. + * + * @throws IllegalStateException If the subclass can't handle CloseStream events. + */ + ChangeStreamStateMachine.State handleCloseStream( + ReadChangeStreamResponse.CloseStream closeStream) { + throw new IllegalStateException(); + } + + /** + * Accepts a new mod and transitions to the next state. A mod could be a DeleteFamily, a + * DeleteColumn, or a SetCell. + * + * @param dataChange The DataChange that holds the new mod to process. + * @param index The index of the mod in the DataChange. + * @return The next state. + * @throws IllegalStateException If the subclass can't handle the mod. + * @throws ChangeStreamStateMachine.InvalidInputException If the subclass determines that this + * dataChange is invalid. + */ + ChangeStreamStateMachine.State handleMod( + ReadChangeStreamResponse.DataChange dataChange, int index) { + throw new IllegalStateException(); + } + } + + /** + * The default state when the state machine is awaiting a ReadChangeStream response to start a new + * change stream record. It will notify the builder of the new change stream record and transits + * to one of the following states: + * + *

+ *
{@link ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME}, in case of a Heartbeat + * or a CloseStream. + *
Same as {@link ChangeStreamStateMachine#AWAITING_NEW_MOD}, depending on the DataChange. + *
+ */ + private final State AWAITING_NEW_STREAM_RECORD = + new State() { + @Override + State handleHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + validate( + completeChangeStreamRecord == null, + "AWAITING_NEW_STREAM_RECORD: Existing ChangeStreamRecord not consumed yet."); + completeChangeStreamRecord = builder.onHeartbeat(heartbeat); + return AWAITING_STREAM_RECORD_CONSUME; + } + + @Override + State handleCloseStream(ReadChangeStreamResponse.CloseStream closeStream) { + validate( + completeChangeStreamRecord == null, + "AWAITING_NEW_STREAM_RECORD: Existing ChangeStreamRecord not consumed yet."); + completeChangeStreamRecord = builder.onCloseStream(closeStream); + return AWAITING_STREAM_RECORD_CONSUME; + } + + @Override + State handleMod(ReadChangeStreamResponse.DataChange dataChange, int index) { + validate( + completeChangeStreamRecord == null, + "AWAITING_NEW_STREAM_RECORD: Existing ChangeStreamRecord not consumed yet."); + validate( + !dataChange.getRowKey().isEmpty(), + "AWAITING_NEW_STREAM_RECORD: First data change missing rowKey."); + validate( + dataChange.hasCommitTimestamp(), + "AWAITING_NEW_STREAM_RECORD: First data change missing commit timestamp."); + validate( + index == 0, + "AWAITING_NEW_STREAM_RECORD: First data change should start with the first mod."); + validate( + dataChange.getChunksCount() > 0, + "AWAITING_NEW_STREAM_RECORD: First data change missing mods."); + if (dataChange.getType() == Type.GARBAGE_COLLECTION) { + validate( + dataChange.getSourceClusterId().isEmpty(), + "AWAITING_NEW_STREAM_RECORD: GC mutation shouldn't have source cluster id."); + builder.startGcMutation( + dataChange.getRowKey(), + dataChange.getCommitTimestamp(), + dataChange.getTiebreaker()); + } else if (dataChange.getType() == Type.USER) { + validate( + !dataChange.getSourceClusterId().isEmpty(), + "AWAITING_NEW_STREAM_RECORD: User initiated data change missing source cluster id."); + builder.startUserMutation( + dataChange.getRowKey(), + dataChange.getSourceClusterId(), + dataChange.getCommitTimestamp(), + dataChange.getTiebreaker()); + } else { + validate(false, "AWAITING_NEW_STREAM_RECORD: Unexpected type: " + dataChange.getType()); + } + return AWAITING_NEW_MOD.handleMod(dataChange, index); + } + }; + + /** + * A state to handle the next Mod. + * + *
+ *
Valid exit states: + *
{@link ChangeStreamStateMachine#AWAITING_NEW_MOD}. Current mod is added, and we have more + * mods to expect. + *
{@link ChangeStreamStateMachine#AWAITING_CELL_VALUE}. Current mod is the first chunk of a + * chunked SetCell. + *
{@link ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME}. Current mod is the last + * mod of the current logical mutation. + *
+ */ + private final State AWAITING_NEW_MOD = + new State() { + @Override + State handleHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + throw new IllegalStateException( + "AWAITING_NEW_MOD: Can't handle a Heartbeat in the middle of building a ChangeStreamMutation."); + } + + @Override + State handleCloseStream(ReadChangeStreamResponse.CloseStream closeStream) { + throw new IllegalStateException( + "AWAITING_NEW_MOD: Can't handle a CloseStream in the middle of building a ChangeStreamMutation."); + } + + @Override + State handleMod(ReadChangeStreamResponse.DataChange dataChange, int index) { + validate( + 0 <= index && index <= dataChange.getChunksCount() - 1, + "AWAITING_NEW_MOD: Index out of bound."); + ReadChangeStreamResponse.MutationChunk chunk = dataChange.getChunks(index); + Mutation mod = chunk.getMutation(); + // Case 1: SetCell + if (mod.hasSetCell()) { + // Start the Cell and delegate to AWAITING_CELL_VALUE to add the cell value. + Mutation.SetCell setCell = chunk.getMutation().getSetCell(); + if (chunk.hasChunkInfo()) { + // If it has chunk info, it must be the first chunk of a chunked SetCell. + validate( + chunk.getChunkInfo().getChunkedValueOffset() == 0, + "First chunk of a chunked cell must start with offset==0."); + actualTotalSizeOfChunkedSetCell = 0; + } + builder.startCell( + setCell.getFamilyName(), + setCell.getColumnQualifier(), + setCell.getTimestampMicros()); + return AWAITING_CELL_VALUE.handleMod(dataChange, index); + } + // Case 2: DeleteFamily + if (mod.hasDeleteFromFamily()) { + numNonCellMods++; + builder.deleteFamily(mod.getDeleteFromFamily().getFamilyName()); + return checkAndFinishMutationIfNeeded(dataChange, index + 1); + } + // Case 3: DeleteCell + if (mod.hasDeleteFromColumn()) { + numNonCellMods++; + builder.deleteCells( + mod.getDeleteFromColumn().getFamilyName(), + mod.getDeleteFromColumn().getColumnQualifier(), + TimestampRange.create( + mod.getDeleteFromColumn().getTimeRange().getStartTimestampMicros(), + mod.getDeleteFromColumn().getTimeRange().getEndTimestampMicros())); + return checkAndFinishMutationIfNeeded(dataChange, index + 1); + } + throw new IllegalStateException("AWAITING_NEW_MOD: Unexpected mod type"); + } + }; + + /** + * A state that represents a cell's value continuation. + * + *
+ *
Valid exit states: + *
{@link ChangeStreamStateMachine#AWAITING_NEW_MOD}. Current chunked SetCell is added, and + * we have more mods to expect. + *
{@link ChangeStreamStateMachine#AWAITING_CELL_VALUE}. Current chunked SetCell has more + * cell values to expect. + *
{@link ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME}. Current chunked SetCell + * is the last mod of the current logical mutation. + *
+ */ + private final State AWAITING_CELL_VALUE = + new State() { + @Override + State handleHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + throw new IllegalStateException( + "AWAITING_CELL_VALUE: Can't handle a Heartbeat in the middle of building a SetCell."); + } + + @Override + State handleCloseStream(ReadChangeStreamResponse.CloseStream closeStream) { + throw new IllegalStateException( + "AWAITING_CELL_VALUE: Can't handle a CloseStream in the middle of building a SetCell."); + } + + @Override + State handleMod(ReadChangeStreamResponse.DataChange dataChange, int index) { + validate( + 0 <= index && index <= dataChange.getChunksCount() - 1, + "AWAITING_CELL_VALUE: Index out of bound."); + ReadChangeStreamResponse.MutationChunk chunk = dataChange.getChunks(index); + validate( + chunk.getMutation().hasSetCell(), + "AWAITING_CELL_VALUE: Current mod is not a SetCell."); + Mutation.SetCell setCell = chunk.getMutation().getSetCell(); + numCellChunks++; + builder.cellValue(setCell.getValue()); + // Case 1: Current SetCell is chunked. For example: [ReadChangeStreamResponse1: + // {DeleteColumn, DeleteFamily, SetCell_1}, ReadChangeStreamResponse2: {SetCell_2, + // DeleteFamily}]. + if (chunk.hasChunkInfo()) { + validate( + chunk.getChunkInfo().getChunkedValueSize() > 0, + "AWAITING_CELL_VALUE: Chunked value size must be positive."); + actualTotalSizeOfChunkedSetCell += setCell.getValue().size(); + // If it's the last chunk of the chunked SetCell, finish the cell. + if (chunk.getChunkInfo().getLastChunk()) { + builder.finishCell(); + validate( + actualTotalSizeOfChunkedSetCell == chunk.getChunkInfo().getChunkedValueSize(), + "Chunked value size in ChunkInfo doesn't match the actual total size. " + + "ChunkInfo: " + + chunk.getChunkInfo().getChunkedValueSize() + + "; actual total size: " + + actualTotalSizeOfChunkedSetCell); + return checkAndFinishMutationIfNeeded(dataChange, index + 1); + } else { + // If this is not the last chunk of a chunked SetCell, then this must be the last mod + // of the current response, and we're expecting the rest of the chunked cells in the + // following ReadChangeStream response. + validate( + index == dataChange.getChunksCount() - 1, + "AWAITING_CELL_VALUE: Current mod is a chunked SetCell " + + "but not the last chunk, but it's not the last mod of the current response."); + return AWAITING_CELL_VALUE; + } + } + // Case 2: Current SetCell is not chunked. + builder.finishCell(); + return checkAndFinishMutationIfNeeded(dataChange, index + 1); + } + }; + + /** + * A state that represents a completed change stream record. It prevents new change stream records + * from being read until the current one has been consumed. The caller is supposed to consume the + * change stream record by calling {@link ChangeStreamStateMachine#consumeChangeStreamRecord()} + * which will reset the state to {@link ChangeStreamStateMachine#AWAITING_NEW_STREAM_RECORD}. + */ + private final State AWAITING_STREAM_RECORD_CONSUME = + new State() { + @Override + State handleHeartbeat(ReadChangeStreamResponse.Heartbeat heartbeat) { + throw new IllegalStateException( + "AWAITING_STREAM_RECORD_CONSUME: Skipping completed change stream record."); + } + + @Override + State handleCloseStream(ReadChangeStreamResponse.CloseStream closeStream) { + throw new IllegalStateException( + "AWAITING_STREAM_RECORD_CONSUME: Skipping completed change stream record."); + } + + @Override + State handleMod(ReadChangeStreamResponse.DataChange dataChange, int index) { + throw new IllegalStateException( + "AWAITING_STREAM_RECORD_CONSUME: Skipping completed change stream record."); + } + }; + + /** + * Check if we should continue handling mods in the current DataChange or wrap up. There are 3 + * cases: + * + *
    + *
  • 1) index < dataChange.getChunksCount() -> continue to handle the next mod. + *
  • 2_1) index == dataChange.getChunksCount() && dataChange.done == true -> current change + * stream mutation is complete. Wrap it up and return {@link + * ChangeStreamStateMachine#AWAITING_STREAM_RECORD_CONSUME}. + *
  • 2_2) index == dataChange.getChunksCount() && dataChange.done != true -> current change + * stream mutation isn't complete. Return {@link ChangeStreamStateMachine#AWAITING_NEW_MOD} + * to wait for more mods in the next ReadChangeStreamResponse. + *
+ */ + private State checkAndFinishMutationIfNeeded( + ReadChangeStreamResponse.DataChange dataChange, int index) { + validate( + 0 <= index && index <= dataChange.getChunksCount(), + "checkAndFinishMutationIfNeeded: index out of bound."); + // Case 1): Handle the next mod. + if (index < dataChange.getChunksCount()) { + return AWAITING_NEW_MOD.handleMod(dataChange, index); + } + // If we reach here, it means that all the mods in this DataChange have been handled. We should + // finish up the logical mutation or wait for more mods in the next ReadChangeStreamResponse, + // depending on whether the current response is the last response for the logical mutation. + if (dataChange.getDone()) { + // Case 2_1): Current change stream mutation is complete. + validate(!dataChange.getToken().isEmpty(), "Last data change missing token"); + validate(dataChange.hasLowWatermark(), "Last data change missing lowWatermark"); + completeChangeStreamRecord = + builder.finishChangeStreamMutation(dataChange.getToken(), dataChange.getLowWatermark()); + return AWAITING_STREAM_RECORD_CONSUME; + } + // Case 2_2): The current DataChange itself is chunked, so wait for the next + // ReadChangeStreamResponse. Note that we should wait for the new mods instead + // of for the new change stream record since the current record hasn't finished yet. + return AWAITING_NEW_MOD; + } + + private void validate(boolean condition, String message) { + if (!condition) { + throw new ChangeStreamStateMachine.InvalidInputException( + message + + ". numHeartbeats: " + + numHeartbeats + + ", numCloseStreams: " + + numCloseStreams + + ", numDataChanges: " + + numDataChanges + + ", numNonCellMods: " + + numNonCellMods + + ", numCellChunks: " + + numCellChunks + + ", actualTotalSizeOfChunkedSetCell: " + + actualTotalSizeOfChunkedSetCell); + } + } + + static class InvalidInputException extends RuntimeException { + InvalidInputException(String message) { + super(message); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java index 938213fb36..a14fe001cd 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java @@ -87,15 +87,7 @@ public void userInitiatedMutationTest() throws IOException, ClassNotFoundExcepti oos.close(); ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); ChangeStreamMutation actual = (ChangeStreamMutation) ois.readObject(); - Assert.assertEquals(actual.getRowKey(), changeStreamMutation.getRowKey()); - Assert.assertEquals(actual.getType(), changeStreamMutation.getType()); - Assert.assertEquals(actual.getSourceClusterId(), changeStreamMutation.getSourceClusterId()); - Assert.assertEquals(actual.getCommitTimestamp(), changeStreamMutation.getCommitTimestamp()); - Assert.assertEquals(actual.getTieBreaker(), changeStreamMutation.getTieBreaker()); - Assert.assertEquals(actual.getToken(), changeStreamMutation.getToken()); - Assert.assertEquals(actual.getLowWatermark(), changeStreamMutation.getLowWatermark()); - assertThat(actual.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)) - .isEqualTo(changeStreamMutation.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)); + Assert.assertEquals(actual, changeStreamMutation); } @Test @@ -138,15 +130,7 @@ public void gcMutationTest() throws IOException, ClassNotFoundException { oos.close(); ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); ChangeStreamMutation actual = (ChangeStreamMutation) ois.readObject(); - Assert.assertEquals(actual.getRowKey(), changeStreamMutation.getRowKey()); - Assert.assertEquals(actual.getType(), changeStreamMutation.getType()); - Assert.assertEquals(actual.getSourceClusterId(), changeStreamMutation.getSourceClusterId()); - Assert.assertEquals(actual.getCommitTimestamp(), changeStreamMutation.getCommitTimestamp()); - Assert.assertEquals(actual.getTieBreaker(), changeStreamMutation.getTieBreaker()); - Assert.assertEquals(actual.getToken(), changeStreamMutation.getToken()); - Assert.assertEquals(actual.getLowWatermark(), changeStreamMutation.getLowWatermark()); - assertThat(actual.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)) - .isEqualTo(changeStreamMutation.toRowMutation(TABLE_ID).toProto(REQUEST_CONTEXT)); + Assert.assertEquals(actual, changeStreamMutation); } @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapterTest.java new file mode 100644 index 0000000000..e29b914ffc --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/DefaultChangeStreamRecordAdapterTest.java @@ -0,0 +1,446 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.TimestampRange; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecordAdapter.ChangeStreamRecordBuilder; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import com.google.rpc.Status; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DefaultChangeStreamRecordAdapterTest { + + private final DefaultChangeStreamRecordAdapter adapter = new DefaultChangeStreamRecordAdapter(); + private ChangeStreamRecordBuilder changeStreamRecordBuilder; + + @Rule public ExpectedException expect = ExpectedException.none(); + + @Before + public void setUp() { + changeStreamRecordBuilder = adapter.createChangeStreamRecordBuilder(); + } + + @Test + public void isHeartbeatTest() { + ChangeStreamRecord heartbeatRecord = + Heartbeat.fromProto(ReadChangeStreamResponse.Heartbeat.getDefaultInstance()); + ChangeStreamRecord closeStreamRecord = + CloseStream.fromProto(ReadChangeStreamResponse.CloseStream.getDefaultInstance()); + ChangeStreamRecord changeStreamMutationRecord = + ChangeStreamMutation.createGcMutation( + ByteString.copyFromUtf8("key"), Timestamp.getDefaultInstance(), 0) + .setToken("token") + .setLowWatermark(Timestamp.getDefaultInstance()) + .build(); + Assert.assertTrue(adapter.isHeartbeat(heartbeatRecord)); + Assert.assertFalse(adapter.isHeartbeat(closeStreamRecord)); + Assert.assertFalse(adapter.isHeartbeat(changeStreamMutationRecord)); + } + + @Test + public void getTokenFromHeartbeatTest() { + ChangeStreamRecord heartbeatRecord = + Heartbeat.fromProto( + ReadChangeStreamResponse.Heartbeat.newBuilder() + .setLowWatermark(Timestamp.newBuilder().setSeconds(1000).build()) + .setContinuationToken( + StreamContinuationToken.newBuilder().setToken("heartbeat-token").build()) + .build()); + Assert.assertEquals(adapter.getTokenFromHeartbeat(heartbeatRecord), "heartbeat-token"); + } + + @Test(expected = IllegalArgumentException.class) + public void getTokenFromHeartbeatInvalidTypeTest() { + ChangeStreamRecord closeStreamRecord = + CloseStream.fromProto(ReadChangeStreamResponse.CloseStream.getDefaultInstance()); + adapter.getTokenFromHeartbeat(closeStreamRecord); + expect.expectMessage("record is not a Heartbeat."); + } + + @Test + public void isChangeStreamMutationTest() { + ChangeStreamRecord heartbeatRecord = + Heartbeat.fromProto(ReadChangeStreamResponse.Heartbeat.getDefaultInstance()); + ChangeStreamRecord closeStreamRecord = + CloseStream.fromProto(ReadChangeStreamResponse.CloseStream.getDefaultInstance()); + ChangeStreamRecord changeStreamMutationRecord = + ChangeStreamMutation.createGcMutation( + ByteString.copyFromUtf8("key"), Timestamp.getDefaultInstance(), 0) + .setToken("token") + .setLowWatermark(Timestamp.getDefaultInstance()) + .build(); + Assert.assertFalse(adapter.isChangeStreamMutation(heartbeatRecord)); + Assert.assertFalse(adapter.isChangeStreamMutation(closeStreamRecord)); + Assert.assertTrue(adapter.isChangeStreamMutation(changeStreamMutationRecord)); + } + + @Test + public void getTokenFromChangeStreamMutationTest() { + ChangeStreamRecord changeStreamMutationRecord = + ChangeStreamMutation.createGcMutation( + ByteString.copyFromUtf8("key"), Timestamp.getDefaultInstance(), 0) + .setToken("change-stream-mutation-token") + .setLowWatermark(Timestamp.getDefaultInstance()) + .build(); + Assert.assertEquals( + adapter.getTokenFromChangeStreamMutation(changeStreamMutationRecord), + "change-stream-mutation-token"); + } + + @Test(expected = IllegalArgumentException.class) + public void getTokenFromChangeStreamMutationInvalidTypeTest() { + ChangeStreamRecord closeStreamRecord = + CloseStream.fromProto(ReadChangeStreamResponse.CloseStream.getDefaultInstance()); + adapter.getTokenFromChangeStreamMutation(closeStreamRecord); + expect.expectMessage("record is not a ChangeStreamMutation."); + } + + @Test + public void heartbeatTest() { + ReadChangeStreamResponse.Heartbeat expectedHeartbeat = + ReadChangeStreamResponse.Heartbeat.newBuilder() + .setLowWatermark(Timestamp.newBuilder().setSeconds(1000).build()) + .setContinuationToken( + StreamContinuationToken.newBuilder().setToken("random-token").build()) + .build(); + assertThat(changeStreamRecordBuilder.onHeartbeat(expectedHeartbeat)) + .isEqualTo(Heartbeat.fromProto(expectedHeartbeat)); + // Call again. + assertThat(changeStreamRecordBuilder.onHeartbeat(expectedHeartbeat)) + .isEqualTo(Heartbeat.fromProto(expectedHeartbeat)); + } + + @Test + public void closeStreamTest() { + ReadChangeStreamResponse.CloseStream expectedCloseStream = + ReadChangeStreamResponse.CloseStream.newBuilder() + .addContinuationTokens( + StreamContinuationToken.newBuilder().setToken("random-token").build()) + .setStatus(Status.newBuilder().setCode(0).build()) + .build(); + assertThat(changeStreamRecordBuilder.onCloseStream(expectedCloseStream)) + .isEqualTo(CloseStream.fromProto(expectedCloseStream)); + // Call again. + assertThat(changeStreamRecordBuilder.onCloseStream(expectedCloseStream)) + .isEqualTo(CloseStream.fromProto(expectedCloseStream)); + } + + @Test(expected = IllegalArgumentException.class) + public void createHeartbeatWithExistingMutationShouldFailTest() { + changeStreamRecordBuilder.startGcMutation( + ByteString.copyFromUtf8("key"), Timestamp.getDefaultInstance(), 0); + changeStreamRecordBuilder.onHeartbeat(ReadChangeStreamResponse.Heartbeat.getDefaultInstance()); + } + + @Test(expected = IllegalArgumentException.class) + public void createCloseStreamWithExistingMutationShouldFailTest() { + changeStreamRecordBuilder.startGcMutation( + ByteString.copyFromUtf8("key"), Timestamp.getDefaultInstance(), 0); + changeStreamRecordBuilder.onCloseStream( + ReadChangeStreamResponse.CloseStream.getDefaultInstance()); + } + + @Test + public void singleDeleteFamilyTest() { + // Suppose this is the mod we get from the ReadChangeStreamResponse. + Mutation.DeleteFromFamily deleteFromFamily = + Mutation.DeleteFromFamily.newBuilder().setFamilyName("fake-family").build(); + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + + // Expected logical mutation in the change stream record. + ChangeStreamMutation expectedChangeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Create the ChangeStreamMutation through the ChangeStreamRecordBuilder. + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.deleteFamily(deleteFromFamily.getFamilyName()); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + // Call again. + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + } + + @Test + public void singleDeleteCellTest() { + // Suppose this is the mod we get from the ReadChangeStreamResponse. + Mutation.DeleteFromColumn deleteFromColumn = + Mutation.DeleteFromColumn.newBuilder() + .setFamilyName("fake-family") + .setColumnQualifier(ByteString.copyFromUtf8("fake-qualifier")) + .setTimeRange( + TimestampRange.newBuilder() + .setStartTimestampMicros(1000L) + .setEndTimestampMicros(2000L) + .build()) + .build(); + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + + // Expected logical mutation in the change stream record. + ChangeStreamMutation expectedChangeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteCells( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + Range.TimestampRange.create(1000L, 2000L)) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Create the ChangeStreamMutation through the ChangeStreamRecordBuilder. + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.deleteCells( + deleteFromColumn.getFamilyName(), + deleteFromColumn.getColumnQualifier(), + Range.TimestampRange.create( + deleteFromColumn.getTimeRange().getStartTimestampMicros(), + deleteFromColumn.getTimeRange().getEndTimestampMicros())); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + // Call again. + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + } + + @Test + public void singleNonChunkedCellTest() { + // Expected logical mutation in the change stream record. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation expectedChangeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 100L, + ByteString.copyFromUtf8("fake-value")) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Create the ChangeStreamMutation through the ChangeStreamRecordBuilder. + // Suppose the SetCell is not chunked and the state machine calls `cellValue()` once. + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.startCell( + "fake-family", ByteString.copyFromUtf8("fake-qualifier"), 100L); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("fake-value")); + changeStreamRecordBuilder.finishCell(); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + // Call again. + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + } + + @Test + public void singleChunkedCellTest() { + // Expected logical mutation in the change stream record. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation expectedChangeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 100L, + ByteString.copyFromUtf8("fake-value1-value2")) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + // Create the ChangeStreamMutation through the ChangeStreamRecordBuilder. + // Suppose the SetCell is chunked into two pieces and the state machine calls `cellValue()` + // twice. + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.startCell( + "fake-family", ByteString.copyFromUtf8("fake-qualifier"), 100L); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("fake-value1")); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("-value2")); + changeStreamRecordBuilder.finishCell(); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + // Call again. + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + } + + @Test + public void multipleChunkedCellsTest() { + // Expected logical mutation in the change stream record. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation.Builder expectedChangeStreamMutationBuilder = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + for (int i = 0; i < 10; ++i) { + expectedChangeStreamMutationBuilder.setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 100L, + ByteString.copyFromUtf8(i + "-fake-value1-value2-value3")); + } + expectedChangeStreamMutationBuilder.setToken("fake-token").setLowWatermark(fakeLowWatermark); + + // Create the ChangeStreamMutation through the ChangeStreamRecordBuilder. + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + for (int i = 0; i < 10; ++i) { + changeStreamRecordBuilder.startCell( + "fake-family", ByteString.copyFromUtf8("fake-qualifier"), 100L); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8(i + "-fake-value1")); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("-value2")); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("-value3")); + changeStreamRecordBuilder.finishCell(); + } + // Check that they're the same. + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutationBuilder.build()); + // Call again. + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutationBuilder.build()); + } + + @Test + public void multipleDifferentModsTest() { + // Expected logical mutation in the change stream record, which contains one DeleteFromFamily, + // one non-chunked cell, and one chunked cell. + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation.Builder expectedChangeStreamMutationBuilder = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 100L, + ByteString.copyFromUtf8("non-chunked-value")) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 100L, + ByteString.copyFromUtf8("chunked-value")) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark); + + // Create the ChangeStreamMutation through the ChangeStreamRecordBuilder. + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.deleteFamily("fake-family"); + // Add non-chunked cell. + changeStreamRecordBuilder.startCell( + "fake-family", ByteString.copyFromUtf8("fake-qualifier"), 100L); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("non-chunked-value")); + changeStreamRecordBuilder.finishCell(); + // Add chunked cell. + changeStreamRecordBuilder.startCell( + "fake-family", ByteString.copyFromUtf8("fake-qualifier"), 100L); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("chunked")); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("-value")); + changeStreamRecordBuilder.finishCell(); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutationBuilder.build()); + } + + @Test + public void resetTest() { + // Build a Heartbeat. + ReadChangeStreamResponse.Heartbeat expectedHeartbeat = + ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); + assertThat(changeStreamRecordBuilder.onHeartbeat(expectedHeartbeat)) + .isEqualTo(Heartbeat.fromProto(expectedHeartbeat)); + + // Reset and build a CloseStream. + changeStreamRecordBuilder.reset(); + ReadChangeStreamResponse.CloseStream expectedCloseStream = + ReadChangeStreamResponse.CloseStream.getDefaultInstance(); + assertThat(changeStreamRecordBuilder.onCloseStream(expectedCloseStream)) + .isEqualTo(CloseStream.fromProto(expectedCloseStream)); + + // Reset and build a DeleteFamily. + changeStreamRecordBuilder.reset(); + Mutation deleteFromFamily = + Mutation.newBuilder() + .setDeleteFromFamily( + Mutation.DeleteFromFamily.newBuilder().setFamilyName("fake-family").build()) + .build(); + Timestamp fakeCommitTimestamp = Timestamp.newBuilder().setSeconds(1000).build(); + Timestamp fakeLowWatermark = Timestamp.newBuilder().setSeconds(2000).build(); + ChangeStreamMutation expectedChangeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .deleteFamily("fake-family") + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.deleteFamily(deleteFromFamily.getDeleteFromFamily().getFamilyName()); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + + // Reset a build a cell. + changeStreamRecordBuilder.reset(); + expectedChangeStreamMutation = + ChangeStreamMutation.createUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0) + .setCell( + "fake-family", + ByteString.copyFromUtf8("fake-qualifier"), + 100L, + ByteString.copyFromUtf8("fake-value1-value2")) + .setToken("fake-token") + .setLowWatermark(fakeLowWatermark) + .build(); + + changeStreamRecordBuilder.startUserMutation( + ByteString.copyFromUtf8("key"), "fake-source-cluster-id", fakeCommitTimestamp, 0); + changeStreamRecordBuilder.startCell( + "fake-family", ByteString.copyFromUtf8("fake-qualifier"), 100L); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("fake-value1")); + changeStreamRecordBuilder.cellValue(ByteString.copyFromUtf8("-value2")); + changeStreamRecordBuilder.finishCell(); + assertThat(changeStreamRecordBuilder.finishChangeStreamMutation("fake-token", fakeLowWatermark)) + .isEqualTo(expectedChangeStreamMutation); + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachineTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachineTest.java new file mode 100644 index 0000000000..d86df91c35 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachineTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.DefaultChangeStreamRecordAdapter; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ChangeStreamStateMachineTest { + ChangeStreamStateMachine changeStreamStateMachine; + + @Before + public void setUp() throws Exception { + changeStreamStateMachine = + new ChangeStreamStateMachine<>( + new DefaultChangeStreamRecordAdapter().createChangeStreamRecordBuilder()); + } + + @Test + public void testErrorHandlingStats() { + ReadChangeStreamResponse.DataChange dataChange = + ReadChangeStreamResponse.DataChange.newBuilder().build(); + + ChangeStreamStateMachine.InvalidInputException actualError = null; + try { + changeStreamStateMachine.handleDataChange(dataChange); + } catch (ChangeStreamStateMachine.InvalidInputException e) { + actualError = e; + } + + assertThat(actualError) + .hasMessageThat() + .containsMatch("AWAITING_NEW_STREAM_RECORD: First data change missing rowKey"); + assertThat(actualError).hasMessageThat().contains("numHeartbeats: 0"); + assertThat(actualError).hasMessageThat().contains("numCloseStreams: 0"); + assertThat(actualError).hasMessageThat().contains("numDataChanges: 1"); + assertThat(actualError).hasMessageThat().contains("numNonCellMods: 0"); + assertThat(actualError).hasMessageThat().contains("numCellChunks: 0"); + assertThat(actualError).hasMessageThat().contains("actualTotalSizeOfChunkedSetCell: 0"); + } +} From c612cf698de6e3b7f639034d262769083d4715cd Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Mon, 8 Aug 2022 14:19:31 -0400 Subject: [PATCH 06/13] feat: Add readChangeStream callables (#1338) * feat: Add readChangeStream callables The merging logic is tested in: ReadChangeStreamMergingCallableTest ReadChangeStreamMergingAcceptanceTest * fix: Fix styles * fix: Make some methods package private Remove all the mutation related tests in ChangeStreamRecordMergingCallableTest. Just use the ReadChangeStreamMergingAcceptanceTest. * fix: Address comments * fix: Address some comments * fix: Add test for [{SC_chunk1}, {SC_chunk2}, {SC_chunk3}]->ChangeStreamMutation{SC} * fix: Update the changestream.json file for better description * fix: Update code comments to make style-check happy * fix: Add sanity check for ChunkedValueSize. Add comments to explain why we can put the AcceptanceTest in the google-cloud-bigtable repo * fix: Fix comment Co-authored-by: Teng Zhong --- google-cloud-bigtable/pom.xml | 33 + .../bigtable/data/v2/BigtableDataClient.java | 155 ++ .../data/v2/models/ChangeStreamMutation.java | 9 +- .../bigtable/data/v2/models/SetCell.java | 5 +- .../data/v2/stub/EnhancedBigtableStub.java | 100 ++ .../v2/stub/EnhancedBigtableStubSettings.java | 42 +- .../ChangeStreamRecordMerger.java | 112 ++ .../ChangeStreamRecordMergingCallable.java | 63 + .../ChangeStreamStateMachine.java | 25 +- .../ReadChangeStreamUserCallable.java | 51 + .../src/main/proto/changestream_tests.proto | 63 + .../src/main/resources/changestream.json | 1379 +++++++++++++++++ .../data/v2/BigtableDataClientTests.java | 35 + .../EnhancedBigtableStubSettingsTest.java | 1 + ...ChangeStreamRecordMergingCallableTest.java | 124 ++ ...ReadChangeStreamMergingAcceptanceTest.java | 263 ++++ .../ReadChangeStreamUserCallableTest.java | 48 + 17 files changed, 2496 insertions(+), 12 deletions(-) create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMerger.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallable.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallable.java create mode 100644 google-cloud-bigtable/src/main/proto/changestream_tests.proto create mode 100644 google-cloud-bigtable/src/main/resources/changestream.json create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallableTest.java diff --git a/google-cloud-bigtable/pom.xml b/google-cloud-bigtable/pom.xml index 8cd922d5f4..1ad646c868 100644 --- a/google-cloud-bigtable/pom.xml +++ b/google-cloud-bigtable/pom.xml @@ -36,6 +36,10 @@ batch-bigtable.googleapis.com:443 + + 1.44.0 + 3.19.3 + ${protobuf.version} @@ -545,7 +549,36 @@ + + + kr.motd.maven + os-maven-plugin + 1.6.0 + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + + + compile + compile-custom + + + + + + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + + grpc-java + + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + org.codehaus.mojo build-helper-maven-plugin diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java index 38bc4dc811..2de6e9b94a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java @@ -31,11 +31,13 @@ import com.google.api.gax.rpc.UnaryCallable; import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.models.BulkMutation; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters; import com.google.cloud.bigtable.data.v2.models.Filters.Filter; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowAdapter; @@ -1627,6 +1629,159 @@ public ServerStreamingCallable listChangeStreamPartitionsCalla return stub.listChangeStreamPartitionsCallable(); } + /** + * Convenience method for synchronously streaming the results of a {@link ReadChangeStreamQuery}. + * The returned ServerStream instance is not threadsafe, it can only be used from single thread. + * + *

Sample code: + * + *

{@code
+   * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+   *   String tableId = "[TABLE]";
+   *
+   *   ReadChangeStreamQuery query = ReadChangeStreamQuery.create(tableId)
+   *          .streamPartition("START_KEY", "END_KEY")
+   *          .startTime(Timestamp.newBuilder().setSeconds(100).build());
+   *
+   *   try {
+   *     ServerStream stream = bigtableDataClient.readChangeStream(query);
+   *     int count = 0;
+   *
+   *     // Iterator style
+   *     for (ChangeStreamRecord record : stream) {
+   *       if (++count > 10) {
+   *         stream.cancel();
+   *         break;
+   *       }
+   *       // Do something with the change stream record.
+   *     }
+   *   } catch (NotFoundException e) {
+   *     System.out.println("Tried to read a non-existent table");
+   *   } catch (RuntimeException e) {
+   *     e.printStackTrace();
+   *   }
+   * }
+   * }
+ * + * @see ServerStreamingCallable For call styles. + * @see ReadChangeStreamQuery For query options. + */ + @InternalApi("Used in Changestream beam pipeline.") + public ServerStream readChangeStream(ReadChangeStreamQuery query) { + return readChangeStreamCallable().call(query); + } + + /** + * Convenience method for asynchronously streaming the results of a {@link ReadChangeStreamQuery}. + * + *

Sample code: + * + *

{@code
+   * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+   *   String tableId = "[TABLE]";
+   *
+   *   ReadChangeStreamQuery query = ReadChangeStreamQuery.create(tableId)
+   *          .streamPartition("START_KEY", "END_KEY")
+   *          .startTime(Timestamp.newBuilder().setSeconds(100).build());
+   *
+   *   bigtableDataClient.readChangeStreamAsync(query, new ResponseObserver() {
+   *     StreamController controller;
+   *     int count = 0;
+   *
+   *     public void onStart(StreamController controller) {
+   *       this.controller = controller;
+   *     }
+   *     public void onResponse(ChangeStreamRecord record) {
+   *       if (++count > 10) {
+   *         controller.cancel();
+   *         return;
+   *       }
+   *       // Do something with the change stream record.
+   *     }
+   *     public void onError(Throwable t) {
+   *       if (t instanceof NotFoundException) {
+   *         System.out.println("Tried to read a non-existent table");
+   *       } else {
+   *         t.printStackTrace();
+   *       }
+   *     }
+   *     public void onComplete() {
+   *       // Handle stream completion
+   *     }
+   *   });
+   * }
+   * }
+ */ + @InternalApi("Used in Changestream beam pipeline.") + public void readChangeStreamAsync( + ReadChangeStreamQuery query, ResponseObserver observer) { + readChangeStreamCallable().call(query, observer); + } + + /** + * Streams back the results of the query. The returned callable object allows for customization of + * api invocation. + * + *

Sample code: + * + *

{@code
+   * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) {
+   *   String tableId = "[TABLE]";
+   *
+   *   ReadChangeStreamQuery query = ReadChangeStreamQuery.create(tableId)
+   *          .streamPartition("START_KEY", "END_KEY")
+   *          .startTime(Timestamp.newBuilder().setSeconds(100).build());
+   *
+   *   // Iterator style
+   *   try {
+   *     for(ChangeStreamRecord record : bigtableDataClient.readChangeStreamCallable().call(query)) {
+   *       // Do something with record
+   *     }
+   *   } catch (NotFoundException e) {
+   *     System.out.println("Tried to read a non-existent table");
+   *   } catch (RuntimeException e) {
+   *     e.printStackTrace();
+   *   }
+   *
+   *   // Sync style
+   *   try {
+   *     List records = bigtableDataClient.readChangeStreamCallable().all().call(query);
+   *   } catch (NotFoundException e) {
+   *     System.out.println("Tried to read a non-existent table");
+   *   } catch (RuntimeException e) {
+   *     e.printStackTrace();
+   *   }
+   *
+   *   // Point look up
+   *   ApiFuture recordFuture =
+   *     bigtableDataClient.readChangeStreamCallable().first().futureCall(query);
+   *
+   *   ApiFutures.addCallback(recordFuture, new ApiFutureCallback() {
+   *     public void onFailure(Throwable t) {
+   *       if (t instanceof NotFoundException) {
+   *         System.out.println("Tried to read a non-existent table");
+   *       } else {
+   *         t.printStackTrace();
+   *       }
+   *     }
+   *     public void onSuccess(ChangeStreamRecord result) {
+   *       System.out.println("Got record: " + result);
+   *     }
+   *   }, MoreExecutors.directExecutor());
+   *
+   *   // etc
+   * }
+   * }
+ * + * @see ServerStreamingCallable For call styles. + * @see ReadChangeStreamQuery For query options. + */ + @InternalApi("Used in Changestream beam pipeline.") + public ServerStreamingCallable + readChangeStreamCallable() { + return stub.readChangeStreamCallable(); + } + /** Close the clients and releases all associated resources. */ @Override public void close() { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java index 10571ecd1f..db0891fda1 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java @@ -17,6 +17,7 @@ import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; +import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMerger; import com.google.common.base.MoreObjects; import com.google.common.base.Objects; import com.google.common.base.Preconditions; @@ -33,7 +34,7 @@ /** * A ChangeStreamMutation represents a list of mods(represented by List<{@link Entry}>) targeted at - * a single row, which is concatenated by (TODO:ChangeStreamRecordMerger). It represents a logical + * a single row, which is concatenated by {@link ChangeStreamRecordMerger}. It represents a logical * row mutation and can be converted to the original write request(i.e. {@link RowMutation} or * {@link RowMutationEntry}. * @@ -245,17 +246,17 @@ Builder deleteFamily(@Nonnull String familyName) { return this; } - public Builder setToken(@Nonnull String token) { + Builder setToken(@Nonnull String token) { this.token = token; return this; } - public Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { + Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { this.lowWatermark = lowWatermark; return this; } - public ChangeStreamMutation build() { + ChangeStreamMutation build() { Preconditions.checkArgument( token != null && lowWatermark != null, "ChangeStreamMutation must have a continuation token and low watermark."); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java index a157b5cd73..0c1add67f8 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/SetCell.java @@ -16,13 +16,14 @@ package com.google.cloud.bigtable.data.v2.models; import com.google.auto.value.AutoValue; +import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMerger; import com.google.protobuf.ByteString; import java.io.Serializable; import javax.annotation.Nonnull; /** - * Representation of a SetCell mod in a data change, whose value is concatenated by - * (TODO:ChangeStreamRecordMerger) in case of SetCell value chunking. + * Representation of a SetCell mod in a data change, whose value is concatenated by {@link + * ChangeStreamRecordMerger} in case of SetCell value chunking. */ @AutoValue public abstract class SetCell implements Entry, Serializable { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index 7d2cd85b65..7872b1e07e 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -53,6 +53,8 @@ import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; import com.google.bigtable.v2.MutateRowsResponse; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.bigtable.v2.ReadModifyWriteRowRequest; import com.google.bigtable.v2.ReadModifyWriteRowResponse; import com.google.bigtable.v2.ReadRowsRequest; @@ -64,16 +66,23 @@ import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience; import com.google.cloud.bigtable.data.v2.internal.RequestContext; import com.google.cloud.bigtable.data.v2.models.BulkMutation; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamMutation; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecordAdapter; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; +import com.google.cloud.bigtable.data.v2.models.DefaultChangeStreamRecordAdapter; import com.google.cloud.bigtable.data.v2.models.DefaultRowAdapter; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowAdapter; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMergingCallable; import com.google.cloud.bigtable.data.v2.stub.changestream.ListChangeStreamPartitionsUserCallable; +import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamUserCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory; @@ -147,6 +156,9 @@ public class EnhancedBigtableStub implements AutoCloseable { private final ServerStreamingCallable listChangeStreamPartitionsCallable; + private final ServerStreamingCallable + readChangeStreamCallable; + public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings) throws IOException { settings = finalizeSettings(settings, Tags.getTagger(), Stats.getStatsRecorder()); @@ -290,6 +302,8 @@ public EnhancedBigtableStub(EnhancedBigtableStubSettings settings, ClientContext checkAndMutateRowCallable = createCheckAndMutateRowCallable(); readModifyWriteRowCallable = createReadModifyWriteRowCallable(); listChangeStreamPartitionsCallable = createListChangeStreamPartitionsCallable(); + readChangeStreamCallable = + createReadChangeStreamCallable(new DefaultChangeStreamRecordAdapter()); } // @@ -874,6 +888,86 @@ public Map extract( return traced.withDefaultCallContext(clientContext.getDefaultCallContext()); } + /** + * Creates a callable chain to handle streaming ReadChangeStream RPCs. The chain will: + * + *
    + *
  • Convert a {@link ReadChangeStreamQuery} into a {@link ReadChangeStreamRequest} and + * dispatch the RPC. + *
  • Upon receiving the response stream, it will produce a stream of ChangeStreamRecordT. In + * case of mutations, it will merge the {@link ReadChangeStreamResponse.DataChange}s into + * {@link ChangeStreamMutation}. The actual change stream record implementation can be + * configured by the {@code changeStreamRecordAdapter} parameter. + *
  • TODO: Retry/resume on failure. + *
  • Add tracing & metrics. + *
+ */ + public + ServerStreamingCallable + createReadChangeStreamCallable( + ChangeStreamRecordAdapter changeStreamRecordAdapter) { + ServerStreamingCallable base = + GrpcRawCallableFactory.createServerStreamingCallable( + GrpcCallSettings.newBuilder() + .setMethodDescriptor(BigtableGrpc.getReadChangeStreamMethod()) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract( + ReadChangeStreamRequest readChangeStreamRequest) { + return ImmutableMap.of( + "table_name", readChangeStreamRequest.getTableName(), + "app_profile_id", readChangeStreamRequest.getAppProfileId()); + } + }) + .build(), + settings.readChangeStreamSettings().getRetryableCodes()); + + ServerStreamingCallable withStatsHeaders = + new StatsHeadersServerStreamingCallable<>(base); + + // Sometimes ReadChangeStream connections are disconnected via an RST frame. This error is + // transient and should be treated similar to UNAVAILABLE. However, this exception has an + // INTERNAL error code which by default is not retryable. Convert the exception it can be + // retried in the client. + ServerStreamingCallable convertException = + new ConvertStreamExceptionCallable<>(withStatsHeaders); + + ServerStreamingCallable merging = + new ChangeStreamRecordMergingCallable<>(convertException, changeStreamRecordAdapter); + + // Copy idle timeout settings for watchdog. + ServerStreamingCallSettings innerSettings = + ServerStreamingCallSettings.newBuilder() + // TODO: setResumptionStrategy. + .setRetryableCodes(settings.readChangeStreamSettings().getRetryableCodes()) + .setRetrySettings(settings.readChangeStreamSettings().getRetrySettings()) + .setIdleTimeout(settings.readChangeStreamSettings().getIdleTimeout()) + .build(); + + ServerStreamingCallable watched = + Callables.watched(merging, innerSettings, clientContext); + + ServerStreamingCallable withBigtableTracer = + new BigtableTracerStreamingCallable<>(watched); + + // TODO: Add ReadChangeStreamRetryCompletedCallable. + + ServerStreamingCallable readChangeStreamCallable = + Callables.retrying(withBigtableTracer, innerSettings, clientContext); + + ServerStreamingCallable + readChangeStreamUserCallable = + new ReadChangeStreamUserCallable<>(readChangeStreamCallable, requestContext); + + SpanName span = getSpanName("ReadChangeStream"); + ServerStreamingCallable traced = + new TracedServerStreamingCallable<>( + readChangeStreamUserCallable, clientContext.getTracerFactory(), span); + + return traced.withDefaultCallContext(clientContext.getDefaultCallContext()); + } + /** * Wraps a callable chain in a user presentable callable that will inject the default call context * and trace the call. @@ -935,6 +1029,12 @@ public UnaryCallable readModifyWriteRowCallable() { public ServerStreamingCallable listChangeStreamPartitionsCallable() { return listChangeStreamPartitionsCallable; } + + /** Returns a streaming read change stream callable. */ + public ServerStreamingCallable + readChangeStreamCallable() { + return readChangeStreamCallable; + } //
private SpanName getSpanName(String methodName) { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 83f0445bc5..68769c9602 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -35,9 +35,11 @@ import com.google.auth.Credentials; import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.Version; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; @@ -154,6 +156,26 @@ public class EnhancedBigtableStubSettings extends StubSettings READ_CHANGE_STREAM_RETRY_CODES = + ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); + + private static final RetrySettings READ_CHANGE_STREAM_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setMaxAttempts(10) + .setJittered(true) + .setInitialRpcTimeout(Duration.ofMinutes(5)) + .setRpcTimeoutMultiplier(2.0) + .setMaxRpcTimeout(Duration.ofMinutes(5)) + .setTotalTimeout(Duration.ofHours(12)) + .build(); + /** * Scopes that are equivalent to JWT's audience. * @@ -190,8 +212,9 @@ public class EnhancedBigtableStubSettings extends StubSettings checkAndMutateRowSettings; private final UnaryCallSettings readModifyWriteRowSettings; - private final ServerStreamingCallSettings listChangeStreamPartitionsSettings; + private final ServerStreamingCallSettings + readChangeStreamSettings; private EnhancedBigtableStubSettings(Builder builder) { super(builder); @@ -228,6 +251,7 @@ private EnhancedBigtableStubSettings(Builder builder) { checkAndMutateRowSettings = builder.checkAndMutateRowSettings.build(); readModifyWriteRowSettings = builder.readModifyWriteRowSettings.build(); listChangeStreamPartitionsSettings = builder.listChangeStreamPartitionsSettings.build(); + readChangeStreamSettings = builder.readChangeStreamSettings.build(); } /** Create a new builder. */ @@ -515,6 +539,11 @@ public ServerStreamingCallSettings listChangeStreamPartitionsS return listChangeStreamPartitionsSettings; } + public ServerStreamingCallSettings + readChangeStreamSettings() { + return readChangeStreamSettings; + } + /** Returns a builder containing all the values of this settings class. */ public Builder toBuilder() { return new Builder(this); @@ -539,9 +568,10 @@ public static class Builder extends StubSettings.Builder checkAndMutateRowSettings; private final UnaryCallSettings.Builder readModifyWriteRowSettings; - private final ServerStreamingCallSettings.Builder listChangeStreamPartitionsSettings; + private final ServerStreamingCallSettings.Builder + readChangeStreamSettings; /** * Initializes a new Builder with sane defaults for all settings. @@ -659,6 +689,12 @@ private Builder() { .setRetryableCodes(LIST_CHANGE_STREAM_PARTITIONS_RETRY_CODES) .setRetrySettings(LIST_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS) .setIdleTimeout(Duration.ofMinutes(5)); + + readChangeStreamSettings = ServerStreamingCallSettings.newBuilder(); + readChangeStreamSettings + .setRetryableCodes(READ_CHANGE_STREAM_RETRY_CODES) + .setRetrySettings(READ_CHANGE_STREAM_RETRY_SETTINGS) + .setIdleTimeout(Duration.ofMinutes(5)); } private Builder(EnhancedBigtableStubSettings settings) { @@ -680,6 +716,7 @@ private Builder(EnhancedBigtableStubSettings settings) { checkAndMutateRowSettings = settings.checkAndMutateRowSettings.toBuilder(); readModifyWriteRowSettings = settings.readModifyWriteRowSettings.toBuilder(); listChangeStreamPartitionsSettings = settings.listChangeStreamPartitionsSettings.toBuilder(); + readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder(); } // @@ -892,6 +929,7 @@ public String toString() { .add("checkAndMutateRowSettings", checkAndMutateRowSettings) .add("readModifyWriteRowSettings", readModifyWriteRowSettings) .add("listChangeStreamPartitionsSettings", listChangeStreamPartitionsSettings) + .add("readChangeStreamSettings", readChangeStreamSettings) .add("parent", super.toString()) .toString(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMerger.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMerger.java new file mode 100644 index 0000000000..0b4bf5acdb --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMerger.java @@ -0,0 +1,112 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.api.core.InternalApi; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecordAdapter; +import com.google.cloud.bigtable.gaxx.reframing.Reframer; +import com.google.cloud.bigtable.gaxx.reframing.ReframingResponseObserver; +import com.google.common.base.Preconditions; +import java.util.ArrayDeque; +import java.util.Queue; + +/** + * An implementation of a {@link Reframer} that feeds the change stream record merging {@link + * ChangeStreamStateMachine}. + * + *

{@link ReframingResponseObserver} pushes {@link ReadChangeStreamResponse}s into this class and + * pops a change stream record containing one of the following: 1) Heartbeat. 2) CloseStream. 3) + * ChangeStreamMutation(a representation of a fully merged logical mutation). + * + *

Example usage: + * + *

{@code
+ * ChangeStreamRecordMerger changeStreamRecordMerger =
+ *     new ChangeStreamRecordMerger<>(myChangeStreamRecordAdaptor);
+ *
+ * while(responseIterator.hasNext()) {
+ *   ReadChangeStreamResponse response = responseIterator.next();
+ *
+ *   if (changeStreamRecordMerger.hasFullFrame()) {
+ *     ChangeStreamRecord changeStreamRecord = changeStreamRecordMerger.pop();
+ *     // Do something with change stream record.
+ *   } else {
+ *     changeStreamRecordMerger.push(response);
+ *   }
+ * }
+ *
+ * if (changeStreamRecordMerger.hasPartialFrame()) {
+ *   throw new RuntimeException("Incomplete stream");
+ * }
+ *
+ * }
+ * + *

This class is considered an internal implementation detail and not meant to be used by + * applications. + * + *

Package-private for internal use. + * + * @see ReframingResponseObserver for more details + */ +@InternalApi +public class ChangeStreamRecordMerger + implements Reframer { + private final ChangeStreamStateMachine changeStreamStateMachine; + private final Queue changeStreamRecord; + + public ChangeStreamRecordMerger( + ChangeStreamRecordAdapter.ChangeStreamRecordBuilder + changeStreamRecordBuilder) { + changeStreamStateMachine = new ChangeStreamStateMachine<>(changeStreamRecordBuilder); + changeStreamRecord = new ArrayDeque<>(); + } + + @Override + public void push(ReadChangeStreamResponse response) { + if (response.hasHeartbeat()) { + changeStreamStateMachine.handleHeartbeat(response.getHeartbeat()); + } else if (response.hasCloseStream()) { + changeStreamStateMachine.handleCloseStream(response.getCloseStream()); + } else { + changeStreamStateMachine.handleDataChange(response.getDataChange()); + } + if (changeStreamStateMachine.hasCompleteChangeStreamRecord()) { + changeStreamRecord.add(changeStreamStateMachine.consumeChangeStreamRecord()); + } + } + + @Override + public boolean hasFullFrame() { + return !changeStreamRecord.isEmpty(); + } + + @Override + public boolean hasPartialFrame() { + // Check if buffer in this class contains data. If an assembled is still not available, then + // that means `buffer` has been fully consumed. The last place to check is the + // ChangeStreamStateMachine buffer, to see if it's holding on to an incomplete change + // stream record. + return hasFullFrame() || changeStreamStateMachine.isChangeStreamRecordInProgress(); + } + + @Override + public ChangeStreamRecordT pop() { + return Preconditions.checkNotNull( + changeStreamRecord.poll(), + "ChangeStreamRecordMerger.pop() called when there are no change stream records."); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallable.java new file mode 100644 index 0000000000..5c6c07451b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallable.java @@ -0,0 +1,63 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.api.core.InternalApi; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecordAdapter; +import com.google.cloud.bigtable.gaxx.reframing.ReframingResponseObserver; + +/** + * A ServerStreamingCallable that consumes {@link ReadChangeStreamResponse}s and produces change + * stream records. + * + *

This class delegates all the work to gax's {@link ReframingResponseObserver} and the logic to + * {@link ChangeStreamRecordMerger}. + * + *

This class is considered an internal implementation detail and not meant to be used by + * applications. + */ +@InternalApi +public class ChangeStreamRecordMergingCallable + extends ServerStreamingCallable { + private final ServerStreamingCallable inner; + private final ChangeStreamRecordAdapter changeStreamRecordAdapter; + + public ChangeStreamRecordMergingCallable( + ServerStreamingCallable inner, + ChangeStreamRecordAdapter changeStreamRecordAdapter) { + this.inner = inner; + this.changeStreamRecordAdapter = changeStreamRecordAdapter; + } + + @Override + public void call( + ReadChangeStreamRequest request, + ResponseObserver responseObserver, + ApiCallContext context) { + ChangeStreamRecordAdapter.ChangeStreamRecordBuilder + changeStreamRecordBuilder = changeStreamRecordAdapter.createChangeStreamRecordBuilder(); + ChangeStreamRecordMerger merger = + new ChangeStreamRecordMerger<>(changeStreamRecordBuilder); + ReframingResponseObserver innerObserver = + new ReframingResponseObserver<>(responseObserver, merger); + inner.call(request, innerObserver, context); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java index 7ab7fa2b7b..ea555ea445 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamStateMachine.java @@ -98,6 +98,13 @@ final class ChangeStreamStateMachine { private int numDataChanges = 0; private int numNonCellMods = 0; private int numCellChunks = 0; // 1 for non-chunked cell. + /** + * Expected total size of a chunked SetCell value, given by the {@link + * ReadChangeStreamResponse.MutationChunk.ChunkInfo}. This value should be the same for all chunks + * of a SetCell. + */ + private int expectedTotalSizeOfChunkedSetCell = 0; + private int actualTotalSizeOfChunkedSetCell = 0; private ChangeStreamRecordT completeChangeStreamRecord; @@ -219,6 +226,7 @@ private void reset() { numDataChanges = 0; numNonCellMods = 0; numCellChunks = 0; + expectedTotalSizeOfChunkedSetCell = 0; actualTotalSizeOfChunkedSetCell = 0; completeChangeStreamRecord = null; @@ -383,7 +391,11 @@ State handleMod(ReadChangeStreamResponse.DataChange dataChange, int index) { // If it has chunk info, it must be the first chunk of a chunked SetCell. validate( chunk.getChunkInfo().getChunkedValueOffset() == 0, - "First chunk of a chunked cell must start with offset==0."); + "AWAITING_NEW_MOD: First chunk of a chunked cell must start with offset==0."); + validate( + chunk.getChunkInfo().getChunkedValueSize() > 0, + "AWAITING_NEW_MOD: First chunk of a chunked cell must have a positive chunked value size."); + expectedTotalSizeOfChunkedSetCell = chunk.getChunkInfo().getChunkedValueSize(); actualTotalSizeOfChunkedSetCell = 0; } builder.startCell( @@ -459,15 +471,18 @@ State handleMod(ReadChangeStreamResponse.DataChange dataChange, int index) { validate( chunk.getChunkInfo().getChunkedValueSize() > 0, "AWAITING_CELL_VALUE: Chunked value size must be positive."); + validate( + chunk.getChunkInfo().getChunkedValueSize() == expectedTotalSizeOfChunkedSetCell, + "AWAITING_CELL_VALUE: Chunked value size must be the same for all chunks."); actualTotalSizeOfChunkedSetCell += setCell.getValue().size(); // If it's the last chunk of the chunked SetCell, finish the cell. if (chunk.getChunkInfo().getLastChunk()) { builder.finishCell(); validate( - actualTotalSizeOfChunkedSetCell == chunk.getChunkInfo().getChunkedValueSize(), + actualTotalSizeOfChunkedSetCell == expectedTotalSizeOfChunkedSetCell, "Chunked value size in ChunkInfo doesn't match the actual total size. " - + "ChunkInfo: " - + chunk.getChunkInfo().getChunkedValueSize() + + "Expected total size: " + + expectedTotalSizeOfChunkedSetCell + "; actual total size: " + actualTotalSizeOfChunkedSetCell); return checkAndFinishMutationIfNeeded(dataChange, index + 1); @@ -569,6 +584,8 @@ private void validate(boolean condition, String message) { + numNonCellMods + ", numCellChunks: " + numCellChunks + + ", expectedTotalSizeOfChunkedSetCell: " + + expectedTotalSizeOfChunkedSetCell + ", actualTotalSizeOfChunkedSetCell: " + actualTotalSizeOfChunkedSetCell); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallable.java new file mode 100644 index 0000000000..0c78199ccd --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallable.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.api.core.InternalApi; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; + +/** + * A ServerStreamingCallable that converts a {@link ReadChangeStreamQuery} to a {@link + * ReadChangeStreamRequest}. + */ +@InternalApi("Used in Changestream beam pipeline.") +public class ReadChangeStreamUserCallable + extends ServerStreamingCallable { + private final ServerStreamingCallable inner; + private final RequestContext requestContext; + + public ReadChangeStreamUserCallable( + ServerStreamingCallable inner, + RequestContext requestContext) { + this.inner = inner; + this.requestContext = requestContext; + } + + @Override + public void call( + ReadChangeStreamQuery request, + ResponseObserver responseObserver, + ApiCallContext context) { + ReadChangeStreamRequest innerRequest = request.toProto(requestContext); + inner.call(innerRequest, responseObserver, context); + } +} diff --git a/google-cloud-bigtable/src/main/proto/changestream_tests.proto b/google-cloud-bigtable/src/main/proto/changestream_tests.proto new file mode 100644 index 0000000000..779cdfba35 --- /dev/null +++ b/google-cloud-bigtable/src/main/proto/changestream_tests.proto @@ -0,0 +1,63 @@ +// Copyright 2022, Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.conformance.bigtable.v2; + +import "google/bigtable/v2/bigtable.proto"; +import "google/protobuf/timestamp.proto"; +import "google/bigtable/v2/data.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.V2.Tests.Conformance"; +option java_outer_classname = "ChangeStreamTestDefinition"; +option java_package = "com.google.cloud.conformance.bigtable.v2"; +option go_package = "google/cloud/conformance/bigtable/v2"; + +message ChangeStreamTestFile { + repeated ReadChangeStreamTest read_change_stream_tests = 1; +} + +message ReadChangeStreamTest { + + message TestChangeStreamMutation { + bytes row_key = 1; + google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type type = 2; + string source_cluster_id = 3; + google.protobuf.Timestamp commit_timestamp = 4; + int64 tiebreaker = 5; + string token = 6; + google.protobuf.Timestamp low_watermark = 7; + repeated google.bigtable.v2.Mutation mutations = 8; + } + + message TestChangeStreamRecord { + oneof record { + google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 1; + google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 2; + TestChangeStreamMutation change_stream_mutation = 3; + } + } + + // Expected results of reading the change stream. + // Only the last result can be an error. + message Result { + TestChangeStreamRecord record = 1; + bool error = 2; + } + + string description = 1; + repeated google.bigtable.v2.ReadChangeStreamResponse api_responses = 2; + repeated Result results = 3; +} diff --git a/google-cloud-bigtable/src/main/resources/changestream.json b/google-cloud-bigtable/src/main/resources/changestream.json new file mode 100644 index 0000000000..2dd886f805 --- /dev/null +++ b/google-cloud-bigtable/src/main/resources/changestream.json @@ -0,0 +1,1379 @@ +{ + "readChangeStreamTests": [ + { + "description": "1 heartbeat", + "api_responses": [ + { + "heartbeat": { + "continuation_token": { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "" + } + }, + "token": "heartbeat-token" + }, + "low_watermark": "2022-07-01T00:00:00Z" + } + } + ], + "results": [ + { + "record" : { + "heartbeat": { + "continuation_token": { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "" + } + }, + "token": "heartbeat-token" + }, + "low_watermark": "2022-07-01T00:00:00Z" + } + }, + "error": false + } + ] + }, + { + "description": "1 CloseStream", + "api_responses": [ + { + "close_stream": { + "status": { + "code": "11", + "message": "Partition boundaries are misaligned." + }, + "continuation_tokens": [ + { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "0000000000000001" + } + }, + "token": "close-stream-token-1" + }, + { + "partition": { + "row_range": { + "start_key_closed": "0000000000000001", + "end_key_open": "0000000000000002" + } + }, + "token": "close-stream-token-2" + } + ] + } + } + ], + "results": [ + { + "record" : { + "close_stream": { + "status": { + "code": "11", + "message": "Partition boundaries are misaligned." + }, + "continuation_tokens": [ + { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "0000000000000001" + } + }, + "token": "close-stream-token-1" + }, + { + "partition": { + "row_range": { + "start_key_closed": "0000000000000001", + "end_key_open": "0000000000000002" + } + }, + "token": "close-stream-token-2" + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 heartbeat + 1 CloseStream", + "api_responses": [ + { + "heartbeat": { + "continuation_token": { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "" + } + }, + "token": "heartbeat-token" + }, + "low_watermark": "2022-07-01T00:00:00Z" + } + }, + { + "close_stream": { + "status": { + "code": "11", + "message": "Partition boundaries are misaligned." + }, + "continuation_tokens": [ + { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "0000000000000001" + } + }, + "token": "close-stream-token-1" + } + ] + } + } + ], + "results": [ + { + "record" : { + "heartbeat": { + "continuation_token": { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "" + } + }, + "token": "heartbeat-token" + }, + "low_watermark": "2022-07-01T00:00:00Z" + } + }, + "error": false + }, + { + "record" : { + "close_stream": { + "status": { + "code": "11", + "message": "Partition boundaries are misaligned." + }, + "continuation_tokens": [ + { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "0000000000000001" + } + }, + "token": "close-stream-token-1" + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 logical mutation no chunking([{DF,DC,SC}]->ChangeStreamMutation{DF,DC,SC})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + }, + { + "mutation": { + "delete_from_column" : { + "family_name": "family", + "column_qualifier": "dg==", + "time_range": { + "start_timestamp_micros": 5000, + "end_timestamp_micros": 15000 + } + } + } + }, + { + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "low_watermark": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "mutations": [ + { + "delete_from_family": { + "family_name": "family" + } + }, + { + "delete_from_column" : { + "family_name": "family", + "column_qualifier": "dg==", + "time_range": { + "start_timestamp_micros": 5000, + "end_timestamp_micros": 15000 + } + } + }, + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 incomplete logical mutation(missing `done: true`)", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ] + } + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "GC mutation no source cluster id", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "GARBAGE_COLLECTION", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "GARBAGE_COLLECTION", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "delete_from_family": { + "family_name": "family" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 chunked SetCell([{SC_chunk1(v)}, {SC_chunk2(alue-VAL)}]->ChangeStreamMutation{SC(value-VAL)})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "ChunkedValueSize mismatch for a chunked SetCell([{SC_chunk1(v)}, {SC_chunk2(alue-VAL)}]->error)", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "chunk_info": { + "chunked_value_size": 1 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "1 chunked SetCell([{SC_chunk1(v)}, {SC_chunk2(alue-VAL)}, {SC_chunk3(-VAL)}]->ChangeStreamMutation{SC(value-VAL-VAL)})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "chunk_info": { + "chunked_value_size": 13 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 13 + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 9, + "chunked_value_size": 13, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "LVZBTA==" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFMLVZBTA==" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "2 chunked SetCells([{SC1_chunk1(v)}, {SC1_chunk2(alue-VAL), SC2_chunk1(v)}, {SC2_chunk2(alue-VAL)}]->ChangeStreamMutation{SC1(value-VAL),SC2(value-VAL)})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + }, + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + }, + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 chunked SetCell + 1 unchunked SetCell([{SC1_chunk1(v)}, {SC1_chunk2(alue-VAL), SC2(value-VAL)}]->ChangeStreamMutation{SC1(value-VAL),SC2(value-VAL)})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + }, + { + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + }, + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 unchunked SetCell + 1 chunked SetCell([{SC1(v), SC2_chunk1(v)}, {SC2_chunk2(alue-VAL)}]->ChangeStreamMutation{SC1(v),SC2(value-VAL)})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + }, + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + }, + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 mod + 1 chunked SetCell + 1 mod([{DF1,SC_chunk1(v)}, {SC_chunk2(alue-VAL), DF2}]->ChangeStreamMutation{DF1,SC(value-VAL),DF2})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + }, + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + }, + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "delete_from_family": { + "family_name": "family" + } + }, + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + }, + { + "delete_from_family": { + "family_name": "family" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "1 chunked SetCell + many nonchunked mods([{SC_chunk1(v)}, {SC_chunk2(alue-VAL),DF,DC}]->ChangeStreamMutation{SC(value-VAL),DF,DC})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "chunk_info": { + "chunked_value_size": 9 + }, + "mutation": { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dg==" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "chunk_info": { + "chunked_value_offset": 1, + "chunked_value_size": 9, + "last_chunk": true + }, + "mutation": { + "set_cell": { + "value": "YWx1ZS1WQUw=" + } + } + }, + { + "mutation": { + "delete_from_column" : { + "family_name": "family", + "column_qualifier": "dg==", + "time_range": { + "start_timestamp_micros": 5000, + "end_timestamp_micros": 15000 + } + } + } + }, + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "set_cell": { + "family_name": "family", + "column_qualifier": "0000000000000000", + "timestamp_micros": 1000, + "value": "dmFsdWUtVkFM" + } + }, + { + "delete_from_column" : { + "family_name": "family", + "column_qualifier": "dg==", + "time_range": { + "start_timestamp_micros": 5000, + "end_timestamp_micros": 15000 + } + } + }, + { + "delete_from_family": { + "family_name": "family" + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "non SetCell chunking([{DF1},{DF2,DC}]->ChangeStreamMutation{DF1,DF2,DC})", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + }, + { + "mutation": { + "delete_from_column" : { + "family_name": "family", + "column_qualifier": "dg==", + "time_range": { + "start_timestamp_micros": 5000, + "end_timestamp_micros": 15000 + } + } + } + } + ], + "done": true + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "delete_from_family": { + "family_name": "family" + } + }, + { + "delete_from_family": { + "family_name": "family" + } + }, + { + "delete_from_column" : { + "family_name": "family", + "column_qualifier": "dg==", + "time_range": { + "start_timestamp_micros": 5000, + "end_timestamp_micros": 15000 + } + } + } + ] + } + }, + "error": false + } + ] + }, + { + "description": "2 logical mutations with non SetCell chunking + CloseStream([{Change1_DF1}, {Change1_DF2}, {Change2_DF3}, {Change2_DF4}, {CloseStream}]->[ChangeStreamMutation1{DF1,DF2}),ChangeStreamMutation2{DF3,DF4}),CloseStream]", + "api_responses": [ + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ], + "done": true + } + }, + { + "data_change": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ] + } + }, + { + "data_change": { + "type": "CONTINUATION", + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "chunks": [ + { + "mutation": { + "delete_from_family": { + "family_name": "family" + } + } + } + ], + "done": true + } + }, + { + "close_stream": { + "status": { + "code": "11", + "message": "Partition boundaries are misaligned." + }, + "continuation_tokens": [ + { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "0000000000000001" + } + }, + "token": "close-stream-token-1" + }, + { + "partition": { + "row_range": { + "start_key_closed": "0000000000000001", + "end_key_open": "0000000000000002" + } + }, + "token": "close-stream-token-2" + } + ] + } + } + ], + "results": [ + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "delete_from_family": { + "family_name": "family" + } + }, + { + "delete_from_family": { + "family_name": "family" + } + } + ] + } + }, + "error": false + }, + { + "record": { + "change_stream_mutation": { + "row_key": "0000000000000000", + "type": "USER", + "source_cluster_id": "source-cluster-id", + "commit_timestamp": "2022-07-01T00:00:00Z", + "tiebreaker": 100, + "token": "data-change-token", + "low_watermark": "2022-07-01T00:00:00Z", + "mutations": [ + { + "delete_from_family": { + "family_name": "family" + } + }, + { + "delete_from_family": { + "family_name": "family" + } + } + ] + } + }, + "error": false + }, + { + "record" : { + "close_stream": { + "status": { + "code": "11", + "message": "Partition boundaries are misaligned." + }, + "continuation_tokens": [ + { + "partition": { + "row_range": { + "start_key_closed": "", + "end_key_open": "0000000000000001" + } + }, + "token": "close-stream-token-1" + }, + { + "partition": { + "row_range": { + "start_key_closed": "0000000000000001", + "end_key_open": "0000000000000002" + } + }, + "token": "close-stream-token-2" + } + ] + } + }, + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java index fcbcc15e30..648a298155 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java @@ -26,11 +26,13 @@ import com.google.api.gax.rpc.UnaryCallable; import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.models.BulkMutation; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters.Filter; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Mutation; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowCell; @@ -83,6 +85,10 @@ public class BigtableDataClientTests { @Mock(answer = Answers.RETURNS_DEEP_STUBS) private ServerStreamingCallable mockListChangeStreamPartitionsCallable; + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ServerStreamingCallable + mockReadChangeStreamCallable; + private BigtableDataClient bigtableDataClient; @Before @@ -165,6 +171,13 @@ public void proxyListChangeStreamPartitionsCallableTest() { .isSameInstanceAs(mockListChangeStreamPartitionsCallable); } + @Test + public void proxyReadChangeStreamCallableTest() { + Mockito.when(mockStub.readChangeStreamCallable()).thenReturn(mockReadChangeStreamCallable); + assertThat(bigtableDataClient.readChangeStreamCallable()) + .isSameInstanceAs(mockReadChangeStreamCallable); + } + @Test public void proxyReadRowAsyncTest() { Mockito.when(mockStub.readRowCallable()).thenReturn(mockReadRowCallable); @@ -334,6 +347,28 @@ public void proxyListChangeStreamPartitionsAsyncTest() { Mockito.verify(mockListChangeStreamPartitionsCallable).call("fake-table", mockObserver); } + @Test + public void proxyReadChangeStreamSyncTest() { + Mockito.when(mockStub.readChangeStreamCallable()).thenReturn(mockReadChangeStreamCallable); + + ReadChangeStreamQuery query = ReadChangeStreamQuery.create("fake-table"); + bigtableDataClient.readChangeStream(query); + + Mockito.verify(mockReadChangeStreamCallable).call(query); + } + + @Test + public void proxyReadChangeStreamAsyncTest() { + Mockito.when(mockStub.readChangeStreamCallable()).thenReturn(mockReadChangeStreamCallable); + + @SuppressWarnings("unchecked") + ResponseObserver mockObserver = Mockito.mock(ResponseObserver.class); + ReadChangeStreamQuery query = ReadChangeStreamQuery.create("fake-table"); + bigtableDataClient.readChangeStreamAsync(query, mockObserver); + + Mockito.verify(mockReadChangeStreamCallable).call(query, mockObserver); + } + @Test public void proxySampleRowKeysCallableTest() { Mockito.when(mockStub.sampleRowKeysCallable()).thenReturn(mockSampleRowKeysCallable); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index 32ab93d1f2..fa2efbf7e0 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -702,6 +702,7 @@ public void isRefreshingChannelFalseValueTest() { "checkAndMutateRowSettings", "readModifyWriteRowSettings", "listChangeStreamPartitionsSettings", + "readChangeStreamSettings", }; @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java new file mode 100644 index 0000000000..d23eb64765 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java @@ -0,0 +1,124 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamContinuationToken; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.CloseStream; +import com.google.cloud.bigtable.data.v2.models.DefaultChangeStreamRecordAdapter; +import com.google.cloud.bigtable.data.v2.models.Heartbeat; +import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi; +import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi.ServerStreamingStashCallable; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import com.google.rpc.Status; +import java.util.Collections; +import java.util.List; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Additional tests in addition to {@link ReadChangeStreamMergingAcceptanceTest}. + * + *

All the ChangeStreamMutation tests are in {@link ReadChangeStreamMergingAcceptanceTest}. + */ +@RunWith(JUnit4.class) +public class ChangeStreamRecordMergingCallableTest { + + @Test + public void heartbeatTest() { + ReadChangeStreamResponse.Heartbeat heartbeatProto = + ReadChangeStreamResponse.Heartbeat.newBuilder() + .setLowWatermark(Timestamp.newBuilder().setSeconds(1000).build()) + .setContinuationToken( + StreamContinuationToken.newBuilder().setToken("random-token").build()) + .build(); + ReadChangeStreamResponse response = + ReadChangeStreamResponse.newBuilder().setHeartbeat(heartbeatProto).build(); + FakeStreamingApi.ServerStreamingStashCallable + inner = new ServerStreamingStashCallable<>(Collections.singletonList(response)); + + ChangeStreamRecordMergingCallable mergingCallable = + new ChangeStreamRecordMergingCallable<>(inner, new DefaultChangeStreamRecordAdapter()); + List results = + mergingCallable.all().call(ReadChangeStreamRequest.getDefaultInstance()); + + // Validate the result. + Assert.assertEquals(results.size(), 1); + ChangeStreamRecord record = results.get(0); + Assert.assertTrue(record instanceof Heartbeat); + Heartbeat heartbeat = (Heartbeat) record; + Assert.assertEquals( + heartbeat.getChangeStreamContinuationToken().getRowRange(), + heartbeatProto.getContinuationToken().getPartition().getRowRange()); + Assert.assertEquals( + heartbeat.getChangeStreamContinuationToken().getToken(), + heartbeatProto.getContinuationToken().getToken()); + Assert.assertEquals(heartbeat.getLowWatermark(), heartbeatProto.getLowWatermark()); + } + + @Test + public void closeStreamTest() { + StreamContinuationToken streamContinuationToken = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("")) + .setEndKeyOpen(ByteString.copyFromUtf8("")) + .build()) + .build()) + .setToken("random-token") + .build(); + ReadChangeStreamResponse.CloseStream closeStreamProto = + ReadChangeStreamResponse.CloseStream.newBuilder() + .addContinuationTokens(streamContinuationToken) + .setStatus(Status.newBuilder().setCode(0).build()) + .build(); + ReadChangeStreamResponse response = + ReadChangeStreamResponse.newBuilder().setCloseStream(closeStreamProto).build(); + FakeStreamingApi.ServerStreamingStashCallable + inner = new ServerStreamingStashCallable<>(Collections.singletonList(response)); + + ChangeStreamRecordMergingCallable mergingCallable = + new ChangeStreamRecordMergingCallable<>(inner, new DefaultChangeStreamRecordAdapter()); + List results = + mergingCallable.all().call(ReadChangeStreamRequest.getDefaultInstance()); + + // Validate the result. + Assert.assertEquals(results.size(), 1); + ChangeStreamRecord record = results.get(0); + Assert.assertTrue(record instanceof CloseStream); + CloseStream closeStream = (CloseStream) record; + Assert.assertEquals(closeStream.getStatus(), closeStreamProto.getStatus()); + Assert.assertEquals(closeStream.getChangeStreamContinuationTokens().size(), 1); + ChangeStreamContinuationToken changeStreamContinuationToken = + closeStream.getChangeStreamContinuationTokens().get(0); + Assert.assertEquals( + changeStreamContinuationToken.getRowRange(), + streamContinuationToken.getPartition().getRowRange()); + Assert.assertEquals( + changeStreamContinuationToken.getToken(), streamContinuationToken.getToken()); + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java new file mode 100644 index 0000000000..5ae88a7f9f --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java @@ -0,0 +1,263 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.client.util.Lists; +import com.google.api.gax.rpc.ServerStream; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamPartition; +import com.google.bigtable.v2.TimestampRange; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamContinuationToken; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamMutation; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.CloseStream; +import com.google.cloud.bigtable.data.v2.models.DefaultChangeStreamRecordAdapter; +import com.google.cloud.bigtable.data.v2.models.DeleteCells; +import com.google.cloud.bigtable.data.v2.models.DeleteFamily; +import com.google.cloud.bigtable.data.v2.models.Entry; +import com.google.cloud.bigtable.data.v2.models.Heartbeat; +import com.google.cloud.bigtable.data.v2.models.SetCell; +import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi; +import com.google.cloud.conformance.bigtable.v2.ChangeStreamTestDefinition.ChangeStreamTestFile; +import com.google.cloud.conformance.bigtable.v2.ChangeStreamTestDefinition.ReadChangeStreamTest; +import com.google.common.base.CaseFormat; +import com.google.protobuf.util.JsonFormat; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Parses and runs the acceptance tests for read change stream. Currently, this test is only used by + * the JAVA library. If in the future we need cross-language support, we should move the test proto + * to https://github.com/googleapis/conformance-tests/tree/main/bigtable/v2/proto/google/cloud/conformance/bigtable/v2 + * and the test data to https://github.com/googleapis/conformance-tests/blob/main/bigtable/v2/readrows.json + */ +@RunWith(Parameterized.class) +public class ReadChangeStreamMergingAcceptanceTest { + // Location: `google-cloud-bigtable/src/main/resources/changestream.json` + private static final String TEST_DATA_JSON_RESOURCE = "changestream.json"; + + private final ReadChangeStreamTest testCase; + + /** + * @param testData The serialized test data representing the test case. + * @param junitName Not used by the test, but used by the parameterized test runner as the name of + * the test. + */ + public ReadChangeStreamMergingAcceptanceTest( + ReadChangeStreamTest testData, @SuppressWarnings("unused") String junitName) { + this.testCase = testData; + } + + // Each tuple consists of [testData: ReadChangeStreamTest, junitName: String] + @Parameterized.Parameters(name = "{1}") + public static Collection data() throws IOException { + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + InputStream dataJson = cl.getResourceAsStream(TEST_DATA_JSON_RESOURCE); + assertWithMessage("Unable to load test definition: %s", TEST_DATA_JSON_RESOURCE) + .that(dataJson) + .isNotNull(); + + InputStreamReader reader = new InputStreamReader(dataJson); + ChangeStreamTestFile.Builder testBuilder = ChangeStreamTestFile.newBuilder(); + JsonFormat.parser().merge(reader, testBuilder); + ChangeStreamTestFile testDefinition = testBuilder.build(); + + List tests = testDefinition.getReadChangeStreamTestsList(); + ArrayList data = new ArrayList<>(tests.size()); + for (ReadChangeStreamTest test : tests) { + String junitName = + CaseFormat.LOWER_HYPHEN.to( + CaseFormat.LOWER_CAMEL, test.getDescription().replace(" ", "-")); + data.add(new Object[] {test, junitName}); + } + return data; + } + + @Test + public void test() throws Exception { + List responses = testCase.getApiResponsesList(); + + // Wrap the responses in a callable. + ServerStreamingCallable source = + new FakeStreamingApi.ServerStreamingStashCallable<>(responses); + ChangeStreamRecordMergingCallable mergingCallable = + new ChangeStreamRecordMergingCallable<>(source, new DefaultChangeStreamRecordAdapter()); + + // Invoke the callable to get the change stream records. + ServerStream stream = + mergingCallable.call(ReadChangeStreamRequest.getDefaultInstance()); + + // Transform the change stream records into ReadChangeStreamTest.Result's. + List actualResults = Lists.newArrayList(); + Exception error = null; + try { + for (ChangeStreamRecord record : stream) { + if (record instanceof Heartbeat) { + Heartbeat heartbeat = (Heartbeat) record; + ReadChangeStreamResponse.Heartbeat heartbeatProto = + ReadChangeStreamResponse.Heartbeat.newBuilder() + .setContinuationToken( + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + heartbeat.getChangeStreamContinuationToken().getRowRange()) + .build()) + .setToken(heartbeat.getChangeStreamContinuationToken().getToken()) + .build()) + .setLowWatermark(heartbeat.getLowWatermark()) + .build(); + actualResults.add( + ReadChangeStreamTest.Result.newBuilder() + .setRecord( + ReadChangeStreamTest.TestChangeStreamRecord.newBuilder() + .setHeartbeat(heartbeatProto) + .build()) + .build()); + } else if (record instanceof CloseStream) { + CloseStream closeStream = (CloseStream) record; + ReadChangeStreamResponse.CloseStream.Builder builder = + ReadChangeStreamResponse.CloseStream.newBuilder().setStatus(closeStream.getStatus()); + for (ChangeStreamContinuationToken token : + closeStream.getChangeStreamContinuationTokens()) { + builder.addContinuationTokens( + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder().setRowRange(token.getRowRange()).build()) + .setToken(token.getToken()) + .build()); + } + ReadChangeStreamResponse.CloseStream closeStreamProto = builder.build(); + actualResults.add( + ReadChangeStreamTest.Result.newBuilder() + .setRecord( + ReadChangeStreamTest.TestChangeStreamRecord.newBuilder() + .setCloseStream(closeStreamProto) + .build()) + .build()); + } else if (record instanceof ChangeStreamMutation) { + ChangeStreamMutation changeStreamMutation = (ChangeStreamMutation) record; + ReadChangeStreamTest.TestChangeStreamMutation.Builder builder = + ReadChangeStreamTest.TestChangeStreamMutation.newBuilder(); + builder.setRowKey(changeStreamMutation.getRowKey()); + builder.setType(changeStreamMutation.getType()); + if (changeStreamMutation.getSourceClusterId() != null) { + builder.setSourceClusterId(changeStreamMutation.getSourceClusterId()); + } + builder.setCommitTimestamp(changeStreamMutation.getCommitTimestamp()); + builder.setTiebreaker(changeStreamMutation.getTieBreaker()); + builder.setToken(changeStreamMutation.getToken()); + builder.setLowWatermark(changeStreamMutation.getLowWatermark()); + for (Entry entry : changeStreamMutation.getEntries()) { + if (entry instanceof DeleteFamily) { + DeleteFamily deleteFamily = (DeleteFamily) entry; + builder.addMutations( + Mutation.newBuilder() + .setDeleteFromFamily( + Mutation.DeleteFromFamily.newBuilder() + .setFamilyName(deleteFamily.getFamilyName()) + .build())); + } else if (entry instanceof DeleteCells) { + DeleteCells deleteCells = (DeleteCells) entry; + builder.addMutations( + Mutation.newBuilder() + .setDeleteFromColumn( + Mutation.DeleteFromColumn.newBuilder() + .setFamilyName(deleteCells.getFamilyName()) + .setColumnQualifier(deleteCells.getQualifier()) + .setTimeRange( + TimestampRange.newBuilder() + .setStartTimestampMicros( + deleteCells.getTimestampRange().getStart()) + .setEndTimestampMicros( + deleteCells.getTimestampRange().getEnd()) + .build()) + .build())); + } else if (entry instanceof SetCell) { + SetCell setCell = (SetCell) entry; + builder.addMutations( + Mutation.newBuilder() + .setSetCell( + Mutation.SetCell.newBuilder() + .setFamilyName(setCell.getFamilyName()) + .setColumnQualifier(setCell.getQualifier()) + .setTimestampMicros(setCell.getTimestamp()) + .setValue(setCell.getValue()))); + } else { + throw new IllegalStateException("Unexpected Entry type"); + } + } + actualResults.add( + ReadChangeStreamTest.Result.newBuilder() + .setRecord( + ReadChangeStreamTest.TestChangeStreamRecord.newBuilder() + .setChangeStreamMutation(builder)) + .build()); + } else { + throw new IllegalStateException("Unexpected ChangeStreamRecord type"); + } + } + } catch (Exception e) { + error = e; + } + + // Verify the results. + if (expectsError(testCase)) { + assertThat(error).isNotNull(); + } else { + if (error != null) { + throw error; + } + } + + assertThat(getNonExceptionResults(testCase)).isEqualTo(actualResults); + } + + private static boolean expectsError(ReadChangeStreamTest testCase) { + List results = testCase.getResultsList(); + return results != null && !results.isEmpty() && results.get(results.size() - 1).getError(); + } + + private static List getNonExceptionResults( + ReadChangeStreamTest testCase) { + List results = testCase.getResultsList(); + List response = new ArrayList<>(); + if (results != null) { + for (ReadChangeStreamTest.Result result : results) { + if (!result.getError()) { + response.add(result); + } + } + } + return response; + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallableTest.java new file mode 100644 index 0000000000..4043ea78cd --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamUserCallableTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.bigtable.v2.*; +import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; +import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi.ServerStreamingStashCallable; +import com.google.common.truth.Truth; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadChangeStreamUserCallableTest { + private static final RequestContext REQUEST_CONTEXT = + RequestContext.create("fake-project", "fake-instance", "fake-profile"); + + @Test + public void testRequestIsConverted() { + ServerStreamingStashCallable innerCallable = + new ServerStreamingStashCallable<>(); + ReadChangeStreamUserCallable callable = + new ReadChangeStreamUserCallable<>(innerCallable, REQUEST_CONTEXT); + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create("fake-table") + .streamPartition("begin", "end") + .startTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(1000).build()) + .endTime(com.google.protobuf.Timestamp.newBuilder().setSeconds(2000).build()) + .heartbeatDuration(5L); + callable.call(query); + Truth.assertThat(innerCallable.getActualRequest()).isEqualTo(query.toProto(REQUEST_CONTEXT)); + } +} From 9b30758a32bc682a4b15c6b374ca9532bfb65e6e Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Tue, 9 Aug 2022 17:15:06 -0400 Subject: [PATCH 07/13] feat: Expose some package-private methods to be used by CDC beam code (#1345) Co-authored-by: Teng Zhong --- .../models/ChangeStreamContinuationToken.java | 7 ++++-- .../data/v2/models/ChangeStreamMutation.java | 25 +++++++++++++------ .../bigtable/data/v2/models/CloseStream.java | 3 ++- .../bigtable/data/v2/models/Heartbeat.java | 3 ++- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java index f499a94e45..bb5363a3a5 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java @@ -15,6 +15,7 @@ */ package com.google.cloud.bigtable.data.v2.models; +import com.google.api.core.InternalApi; import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.StreamContinuationToken; import com.google.common.base.MoreObjects; @@ -57,12 +58,14 @@ public String getToken() { * Creates the protobuf. This method is considered an internal implementation detail and not meant * to be used by applications. */ - StreamContinuationToken toProto() { + @InternalApi("Used in Changestream beam pipeline.") + public StreamContinuationToken toProto() { return builder.build(); } /** Wraps the protobuf {@link StreamContinuationToken}. */ - static ChangeStreamContinuationToken fromProto( + @InternalApi("Used in Changestream beam pipeline.") + public static ChangeStreamContinuationToken fromProto( @Nonnull StreamContinuationToken streamContinuationToken) { return new ChangeStreamContinuationToken(streamContinuationToken.toBuilder()); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java index db0891fda1..f9107220b3 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java @@ -15,6 +15,7 @@ */ package com.google.cloud.bigtable.data.v2.models; +import com.google.api.core.InternalApi; import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMerger; @@ -99,7 +100,8 @@ private ChangeStreamMutation(Builder builder) { * ChangeStreamMutation because `token` and `loWatermark` must be set later when we finish * building the logical mutation. */ - static Builder createUserMutation( + @InternalApi("Used in Changestream beam pipeline.") + public static Builder createUserMutation( @Nonnull ByteString rowKey, @Nonnull String sourceClusterId, @Nonnull Timestamp commitTimestamp, @@ -112,7 +114,8 @@ static Builder createUserMutation( * because `token` and `loWatermark` must be set later when we finish building the logical * mutation. */ - static Builder createGcMutation( + @InternalApi("Used in Changestream beam pipeline.") + public static Builder createGcMutation( @Nonnull ByteString rowKey, @Nonnull Timestamp commitTimestamp, int tieBreaker) { return new Builder(rowKey, Type.GARBAGE_COLLECTION, null, commitTimestamp, tieBreaker); } @@ -224,7 +227,8 @@ private Builder(ChangeStreamMutation changeStreamMutation) { this.lowWatermark = changeStreamMutation.lowWatermark; } - Builder setCell( + @InternalApi("Used in Changestream beam pipeline.") + public Builder setCell( @Nonnull String familyName, @Nonnull ByteString qualifier, long timestamp, @@ -233,7 +237,8 @@ Builder setCell( return this; } - Builder deleteCells( + @InternalApi("Used in Changestream beam pipeline.") + public Builder deleteCells( @Nonnull String familyName, @Nonnull ByteString qualifier, @Nonnull TimestampRange timestampRange) { @@ -241,22 +246,26 @@ Builder deleteCells( return this; } - Builder deleteFamily(@Nonnull String familyName) { + @InternalApi("Used in Changestream beam pipeline.") + public Builder deleteFamily(@Nonnull String familyName) { this.entries.add(DeleteFamily.create(familyName)); return this; } - Builder setToken(@Nonnull String token) { + @InternalApi("Used in Changestream beam pipeline.") + public Builder setToken(@Nonnull String token) { this.token = token; return this; } - Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { + @InternalApi("Used in Changestream beam pipeline.") + public Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { this.lowWatermark = lowWatermark; return this; } - ChangeStreamMutation build() { + @InternalApi("Used in Changestream beam pipeline.") + public ChangeStreamMutation build() { Preconditions.checkArgument( token != null && lowWatermark != null, "ChangeStreamMutation must have a continuation token and low watermark."); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java index 403705f676..e871c86697 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java @@ -69,7 +69,8 @@ private void writeObject(ObjectOutputStream output) throws IOException { } /** Wraps the protobuf {@link ReadChangeStreamResponse.CloseStream}. */ - static CloseStream fromProto(@Nonnull ReadChangeStreamResponse.CloseStream closeStream) { + @InternalApi("Used in Changestream beam pipeline.") + public static CloseStream fromProto(@Nonnull ReadChangeStreamResponse.CloseStream closeStream) { return new CloseStream(closeStream.getStatus(), closeStream.getContinuationTokensList()); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java index f2371c8507..63c23525f3 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java @@ -32,7 +32,8 @@ private static Heartbeat create( } /** Wraps the protobuf {@link ReadChangeStreamResponse.Heartbeat}. */ - static Heartbeat fromProto(@Nonnull ReadChangeStreamResponse.Heartbeat heartbeat) { + @InternalApi("Used in Changestream beam pipeline.") + public static Heartbeat fromProto(@Nonnull ReadChangeStreamResponse.Heartbeat heartbeat) { return create( ChangeStreamContinuationToken.fromProto(heartbeat.getContinuationToken()), heartbeat.getLowWatermark()); From f1176ae2f3af9461770636f0ec53722effe1f8ee Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Wed, 10 Aug 2022 14:46:38 -0400 Subject: [PATCH 08/13] feat: Implement ReadChangeStreamResumptionStrategy (#1344) * feat: Implement ReadChangeStreamResumptionStrategy * fix: Address comments * fix: Fix typos * fix: Update comments * fix: Address comments Co-authored-by: Teng Zhong --- .../data/v2/stub/EnhancedBigtableStub.java | 8 +- .../ReadChangeStreamResumptionStrategy.java | 101 ++++ .../ReadChangeStreamRetryTest.java | 475 ++++++++++++++++++ 3 files changed, 580 insertions(+), 4 deletions(-) create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamResumptionStrategy.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamRetryTest.java diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index 7872b1e07e..6b5746fea2 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -82,6 +82,7 @@ import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMergingCallable; import com.google.cloud.bigtable.data.v2.stub.changestream.ListChangeStreamPartitionsUserCallable; +import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamResumptionStrategy; import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamUserCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; @@ -898,7 +899,7 @@ public Map extract( * case of mutations, it will merge the {@link ReadChangeStreamResponse.DataChange}s into * {@link ChangeStreamMutation}. The actual change stream record implementation can be * configured by the {@code changeStreamRecordAdapter} parameter. - *

  • TODO: Retry/resume on failure. + *
  • Retry/resume on failure. *
  • Add tracing & metrics. * */ @@ -939,7 +940,8 @@ public Map extract( // Copy idle timeout settings for watchdog. ServerStreamingCallSettings innerSettings = ServerStreamingCallSettings.newBuilder() - // TODO: setResumptionStrategy. + .setResumptionStrategy( + new ReadChangeStreamResumptionStrategy<>(changeStreamRecordAdapter)) .setRetryableCodes(settings.readChangeStreamSettings().getRetryableCodes()) .setRetrySettings(settings.readChangeStreamSettings().getRetrySettings()) .setIdleTimeout(settings.readChangeStreamSettings().getIdleTimeout()) @@ -951,8 +953,6 @@ public Map extract( ServerStreamingCallable withBigtableTracer = new BigtableTracerStreamingCallable<>(watched); - // TODO: Add ReadChangeStreamRetryCompletedCallable. - ServerStreamingCallable readChangeStreamCallable = Callables.retrying(withBigtableTracer, innerSettings, clientContext); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamResumptionStrategy.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamResumptionStrategy.java new file mode 100644 index 0000000000..a3532180fc --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamResumptionStrategy.java @@ -0,0 +1,101 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.api.core.InternalApi; +import com.google.api.gax.retrying.StreamResumptionStrategy; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamRequest.Builder; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamContinuationTokens; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecordAdapter; + +/** + * An implementation of a {@link StreamResumptionStrategy} for change stream records. This class + * tracks the continuation token and upon retry can build a request to resume the stream from where + * it left off. + * + *

    This class is considered an internal implementation detail and not meant to be used by + * applications. + */ +@InternalApi +public class ReadChangeStreamResumptionStrategy + implements StreamResumptionStrategy { + private final ChangeStreamRecordAdapter changeStreamRecordAdapter; + private String token = null; + + public ReadChangeStreamResumptionStrategy( + ChangeStreamRecordAdapter changeStreamRecordAdapter) { + this.changeStreamRecordAdapter = changeStreamRecordAdapter; + } + + @Override + public boolean canResume() { + return true; + } + + @Override + public StreamResumptionStrategy createNew() { + return new ReadChangeStreamResumptionStrategy<>(changeStreamRecordAdapter); + } + + @Override + public ChangeStreamRecordT processResponse(ChangeStreamRecordT response) { + // Update the token from a Heartbeat or a ChangeStreamMutation. + // We don't worry about resumption after CloseStream, since the server + // will return an OK status right after sending a CloseStream. + if (changeStreamRecordAdapter.isHeartbeat(response)) { + this.token = changeStreamRecordAdapter.getTokenFromHeartbeat(response); + } + if (changeStreamRecordAdapter.isChangeStreamMutation(response)) { + this.token = changeStreamRecordAdapter.getTokenFromChangeStreamMutation(response); + } + return response; + } + + /** + * {@inheritDoc} + * + *

    Given a request, this implementation will narrow that request to include data changes that + * come after {@link #token}. + */ + @Override + public ReadChangeStreamRequest getResumeRequest(ReadChangeStreamRequest originalRequest) { + // A null token means that we have not successfully read a Heartbeat nor a ChangeStreamMutation, + // so start from the beginning. + if (this.token == null) { + return originalRequest; + } + + Builder builder = originalRequest.toBuilder(); + // We need to clear the start_from and use the updated continuation_tokens + // to resume the request. + // The partition should always be the same as the one from the original request, + // otherwise we would receive a CloseStream with different + // partitions(which indicates tablet split/merge events). + builder.clearStartFrom(); + builder.setContinuationTokens( + StreamContinuationTokens.newBuilder() + .addTokens( + StreamContinuationToken.newBuilder() + .setPartition(originalRequest.getPartition()) + .setToken(this.token) + .build()) + .build()); + + return builder.build(); + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamRetryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamRetryTest.java new file mode 100644 index 0000000000..a0defe6375 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamRetryTest.java @@ -0,0 +1,475 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.changestream; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.rpc.ApiException; +import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.api.gax.rpc.InternalException; +import com.google.api.gax.rpc.ServerStream; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamContinuationTokens; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.internal.NameUtil; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamMutation; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.CloseStream; +import com.google.cloud.bigtable.data.v2.models.Heartbeat; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; +import com.google.common.collect.Lists; +import com.google.common.collect.Queues; +import com.google.common.truth.Truth; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import io.grpc.Status; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import io.grpc.testing.GrpcServerRule; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Queue; +import javax.annotation.Nonnull; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadChangeStreamRetryTest { + private static final String PROJECT_ID = "fake-project"; + private static final String INSTANCE_ID = "fake-instance"; + private static final String TABLE_ID = "fake-table"; + private static final String START_KEY_CLOSED = "a"; + private static final String END_KEY_OPEN = "b"; + private static final String HEARTBEAT_TOKEN = "heartbeat-token"; + private static final String CLOSE_STREAM_TOKEN = "close-stream-token"; + private static final String DATA_CHANGE_TOKEN = "data-change-token"; + private static Timestamp REQUEST_START_TIME = Timestamp.newBuilder().setSeconds(1).build(); + + @Rule public GrpcServerRule serverRule = new GrpcServerRule(); + private TestBigtableService service; + private BigtableDataClient client; + + @Before + public void setUp() throws IOException { + service = new TestBigtableService(); + serverRule.getServiceRegistry().addService(service); + + BigtableDataSettings.Builder settings = + BigtableDataSettings.newBuilder() + .setProjectId(PROJECT_ID) + .setInstanceId(INSTANCE_ID) + .setCredentialsProvider(NoCredentialsProvider.create()); + + settings + .stubSettings() + .setTransportChannelProvider( + FixedTransportChannelProvider.create( + GrpcTransportChannel.create(serverRule.getChannel()))) + .build(); + + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() { + if (client != null) { + client.close(); + } + } + + private StreamContinuationToken createStreamContinuationToken(@Nonnull String token) { + return StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8(START_KEY_CLOSED)) + .setEndKeyOpen(ByteString.copyFromUtf8(END_KEY_OPEN)) + .build()) + .build()) + .setToken(token) + .build(); + } + + private ReadChangeStreamResponse.Heartbeat createHeartbeat( + StreamContinuationToken streamContinuationToken) { + return ReadChangeStreamResponse.Heartbeat.newBuilder() + .setContinuationToken(streamContinuationToken) + .setLowWatermark(Timestamp.newBuilder().setSeconds(1000).build()) + .build(); + } + + private ReadChangeStreamResponse.CloseStream createCloseStream() { + return ReadChangeStreamResponse.CloseStream.newBuilder() + .addContinuationTokens(createStreamContinuationToken(CLOSE_STREAM_TOKEN)) + .setStatus(com.google.rpc.Status.newBuilder().setCode(0).build()) + .build(); + } + + private ReadChangeStreamResponse.DataChange createDataChange(boolean done) { + Mutation deleteFromFamily = + Mutation.newBuilder() + .setDeleteFromFamily( + Mutation.DeleteFromFamily.newBuilder().setFamilyName("fake-family").build()) + .build(); + ReadChangeStreamResponse.DataChange.Builder dataChangeBuilder = + ReadChangeStreamResponse.DataChange.newBuilder() + .setType(ReadChangeStreamResponse.DataChange.Type.USER) + .setSourceClusterId("fake-source-cluster-id") + .setRowKey(ByteString.copyFromUtf8("key")) + .setCommitTimestamp(Timestamp.newBuilder().setSeconds(100).build()) + .setTiebreaker(100) + .addChunks( + ReadChangeStreamResponse.MutationChunk.newBuilder().setMutation(deleteFromFamily)); + if (done) { + dataChangeBuilder.setDone(true); + dataChangeBuilder.setLowWatermark(Timestamp.newBuilder().setSeconds(1).build()); + dataChangeBuilder.setToken(DATA_CHANGE_TOKEN); + } + return dataChangeBuilder.build(); + } + + // [{ReadChangeStreamResponse.Heartbeat}] -> [{Heartbeat}] + @Test + public void happyPathHeartbeatTest() { + ReadChangeStreamResponse heartbeatResponse = + ReadChangeStreamResponse.newBuilder() + .setHeartbeat(createHeartbeat(createStreamContinuationToken(HEARTBEAT_TOKEN))) + .build(); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(heartbeatResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof Heartbeat); + } + + // [{ReadChangeStreamResponse.CloseStream}] -> [{CloseStream}] + @Test + public void happyPathCloseStreamTest() { + ReadChangeStreamResponse closeStreamResponse = + ReadChangeStreamResponse.newBuilder().setCloseStream(createCloseStream()).build(); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(closeStreamResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof CloseStream); + } + + // [{DataChange(done==true)}] -> [{ReadChangeStreamMutation}] + @Test + public void happyPathCompleteDataChangeTest() { + // Setting `done==true` to complete the ChangeStreamMutation. + ReadChangeStreamResponse dataChangeResponse = + ReadChangeStreamResponse.newBuilder().setDataChange(createDataChange(true)).build(); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(dataChangeResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof ChangeStreamMutation); + } + + // [{UNAVAILABLE}, {ReadChangeStreamResponse.Heartbeat}] -> [{Heartbeat}] + @Test + public void singleHeartbeatImmediateRetryTest() { + ReadChangeStreamResponse heartbeatResponse = + ReadChangeStreamResponse.newBuilder() + .setHeartbeat(createHeartbeat(createStreamContinuationToken(HEARTBEAT_TOKEN))) + .build(); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWithStatus(Code.UNAVAILABLE)); + // Resume with the exact same request. + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(heartbeatResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof Heartbeat); + } + + // [{UNAVAILABLE}, {ReadChangeStreamResponse.CloseStream}] -> [{CloseStream}] + @Test + public void singleCloseStreamImmediateRetryTest() { + // CloseStream. + ReadChangeStreamResponse closeStreamResponse = + ReadChangeStreamResponse.newBuilder().setCloseStream(createCloseStream()).build(); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWithStatus(Code.UNAVAILABLE)); + // Resume with the exact same request. + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(closeStreamResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof CloseStream); + } + + // [{UNAVAILABLE}, {DataChange with done==true}] -> [{(ChangeStreamRecord) ChangeStreamMutation}] + @Test + public void singleCompleteDataChangeImmediateRetryTest() { + // DataChange + ReadChangeStreamResponse dataChangeResponse = + ReadChangeStreamResponse.newBuilder().setDataChange(createDataChange(true)).build(); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWithStatus(Code.UNAVAILABLE)); + // Resume with the exact same request. + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(dataChangeResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof ChangeStreamMutation); + } + + // [{ReadChangeStreamResponse.Heartbeat}, {UNAVAILABLE}] -> Resume with token from heartbeat. + @Test + public void errorAfterHeartbeatShouldResumeWithTokenTest() { + StreamContinuationToken streamContinuationToken = + createStreamContinuationToken(HEARTBEAT_TOKEN); + ReadChangeStreamResponse heartbeatResponse = + ReadChangeStreamResponse.newBuilder() + .setHeartbeat(createHeartbeat(streamContinuationToken)) + .build(); + service.expectations.add( + RpcExpectation.create() + .expectInitialRequest() + .respondWith(heartbeatResponse) + .respondWithStatus(Code.UNAVAILABLE)); + // Resume the request with the token from the Heartbeat. `startTime` is cleared. + // We don't care about the response here so just do expectRequest. + service.expectations.add( + RpcExpectation.create() + .expectRequest( + StreamContinuationTokens.newBuilder().addTokens(streamContinuationToken).build())); + List actualResults = getResults(); + // This is the Heartbeat we get before UNAVAILABLE. + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof Heartbeat); + } + + // [{DataChange with done==true}, {UNAVAILABLE}] -> Resume with token from DataChange. + @Test + public void errorAfterDataChangeWithDoneShouldResumeWithTokenTest() { + // DataChange + ReadChangeStreamResponse dataChangeResponse = + ReadChangeStreamResponse.newBuilder().setDataChange(createDataChange(true)).build(); + service.expectations.add( + RpcExpectation.create() + .expectInitialRequest() + .respondWith(dataChangeResponse) + .respondWithStatus(Code.UNAVAILABLE)); + // Resume the request with the token from the ChangeStreamMutation. `startTime` is cleared. + // We don't care about the response here so just do expectRequest. + service.expectations.add( + RpcExpectation.create() + .expectRequest( + StreamContinuationTokens.newBuilder() + .addTokens(createStreamContinuationToken(DATA_CHANGE_TOKEN)) + .build())); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof ChangeStreamMutation); + } + + // [{DataChange with done==false}, {UNAVAILABLE}] -> Resume with original request. + @Test + public void errorAfterDataChangeWithoutDoneShouldResumeWithTokenTest() { + // DataChange + ReadChangeStreamResponse dataChangeResponse = + ReadChangeStreamResponse.newBuilder().setDataChange(createDataChange(false)).build(); + service.expectations.add( + RpcExpectation.create() + .expectInitialRequest() + .respondWith(dataChangeResponse) + .respondWithStatus(Code.UNAVAILABLE)); + // Resume the request with the original request, because the previous DataChange didn't + // complete the ChangeStreamMutation(i.e. without `done==true`). + // We don't care about the response here so just do expectRequest. + service.expectations.add(RpcExpectation.create().expectInitialRequest()); + List actualResults = getResults(); + Truth.assertThat(actualResults).isEmpty(); + } + + // [{DataChange with done==true}, {Heartbeat}, {UNAVAILABLE}] -> Resume with token from Heartbeat. + @Test + public void shouldResumeWithLastTokenTest() { + // DataChange + ReadChangeStreamResponse dataChangeResponse = + ReadChangeStreamResponse.newBuilder().setDataChange(createDataChange(true)).build(); + // Heartbeat. + ReadChangeStreamResponse heartbeatResponse = + ReadChangeStreamResponse.newBuilder() + .setHeartbeat(createHeartbeat(createStreamContinuationToken(HEARTBEAT_TOKEN))) + .build(); + service.expectations.add( + RpcExpectation.create() + .expectInitialRequest() + .respondWith(dataChangeResponse) + .respondWith(heartbeatResponse) + .respondWithStatus(Code.UNAVAILABLE)); + // If we receive a DataChange with done==true and a Heartbeat then a retryable error, it should + // resume with the last token, which is the one from the heartbeat. + // If the original request reads with start_time, it'll be resumed with the continuation token. + // We don't care about the response here so just do expectRequest. + service.expectations.add( + RpcExpectation.create() + .expectRequest( + StreamContinuationTokens.newBuilder() + .addTokens(createStreamContinuationToken(HEARTBEAT_TOKEN)) + .build())); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 2); + Assert.assertTrue(actualResults.get(0) instanceof ChangeStreamMutation); + Assert.assertTrue(actualResults.get(1) instanceof Heartbeat); + } + + @Test + public void retryRstStreamExceptionTest() { + ApiException exception = + new InternalException( + new StatusRuntimeException( + Status.INTERNAL.withDescription( + "INTERNAL: HTTP/2 error code: INTERNAL_ERROR\nReceived Rst Stream")), + GrpcStatusCode.of(Code.INTERNAL), + false); + ReadChangeStreamResponse heartbeatResponse = + ReadChangeStreamResponse.newBuilder() + .setHeartbeat(createHeartbeat(createStreamContinuationToken(HEARTBEAT_TOKEN))) + .build(); + service.expectations.add( + RpcExpectation.create() + .expectInitialRequest() + .respondWithException(Code.INTERNAL, exception)); + service.expectations.add( + RpcExpectation.create().expectInitialRequest().respondWith(heartbeatResponse)); + List actualResults = getResults(); + Assert.assertEquals(actualResults.size(), 1); + Assert.assertTrue(actualResults.get(0) instanceof Heartbeat); + } + + private List getResults() { + ReadChangeStreamQuery query = + ReadChangeStreamQuery.create(TABLE_ID).startTime(REQUEST_START_TIME); + // Always give it this partition. We don't care. + ServerStream actualRecords = + client.readChangeStream(query.streamPartition(START_KEY_CLOSED, END_KEY_OPEN)); + List actualValues = Lists.newArrayList(); + for (ChangeStreamRecord record : actualRecords) { + actualValues.add(record); + } + return actualValues; + } + + private static class TestBigtableService extends BigtableGrpc.BigtableImplBase { + Queue expectations = Queues.newArrayDeque(); + int i = -1; + + @Override + public void readChangeStream( + ReadChangeStreamRequest request, + StreamObserver responseObserver) { + + RpcExpectation expectedRpc = expectations.poll(); + i++; + + Truth.assertWithMessage("Unexpected request#" + i + ":" + request.toString()) + .that(expectedRpc) + .isNotNull(); + Truth.assertWithMessage("Unexpected request#" + i) + .that(request) + .isEqualTo(expectedRpc.getExpectedRequest()); + + for (ReadChangeStreamResponse response : expectedRpc.responses) { + responseObserver.onNext(response); + } + if (expectedRpc.statusCode.toStatus().isOk()) { + responseObserver.onCompleted(); + } else if (expectedRpc.exception != null) { + responseObserver.onError(expectedRpc.exception); + } else { + responseObserver.onError(expectedRpc.statusCode.toStatus().asRuntimeException()); + } + } + } + + private static class RpcExpectation { + ReadChangeStreamRequest.Builder requestBuilder; + Status.Code statusCode; + ApiException exception; + List responses; + + private RpcExpectation() { + this.requestBuilder = + ReadChangeStreamRequest.newBuilder() + .setTableName(NameUtil.formatTableName(PROJECT_ID, INSTANCE_ID, TABLE_ID)) + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8(START_KEY_CLOSED)) + .setEndKeyOpen(ByteString.copyFromUtf8(END_KEY_OPEN)) + .build()) + .build()); + this.statusCode = Status.Code.OK; + this.responses = Lists.newArrayList(); + } + + static RpcExpectation create() { + return new RpcExpectation(); + } + + RpcExpectation expectInitialRequest() { + requestBuilder.setStartTime(REQUEST_START_TIME); + return this; + } + + RpcExpectation expectRequest(StreamContinuationTokens continuationTokens) { + requestBuilder.setContinuationTokens(continuationTokens); + return this; + } + + RpcExpectation respondWithStatus(Status.Code code) { + this.statusCode = code; + return this; + } + + RpcExpectation respondWithException(Status.Code code, ApiException exception) { + this.statusCode = code; + this.exception = exception; + return this; + } + + RpcExpectation respondWith(ReadChangeStreamResponse... responses) { + Collections.addAll(this.responses, responses); + return this; + } + + ReadChangeStreamRequest getExpectedRequest() { + return requestBuilder.build(); + } + } +} From bb5c0c05157043683486f0bffd6c47b803247a60 Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Thu, 11 Aug 2022 12:14:52 -0400 Subject: [PATCH 09/13] feat: Add toByteString/fromByteString for ChangeStreamContinuationToken (#1346) * feat: Add toByteString/fromByteString for ChangeStreamContinuationToken This will be used by the beam connector to write/read to a Bigtable table. This PR also does: 1. Revert the changes in https://github.com/googleapis/java-bigtable/pull/1345 since we can use Mockito to create mock objects for testing. * fix: Update comments * fix: Address comments * fix: Add InternalExtensionOnly annotations for Heartbeat/CloseStream/ChangeStreamMutation Co-authored-by: Teng Zhong --- .../models/ChangeStreamContinuationToken.java | 68 +++++++------ .../data/v2/models/ChangeStreamMutation.java | 31 +++--- .../bigtable/data/v2/models/CloseStream.java | 11 ++- .../bigtable/data/v2/models/Heartbeat.java | 6 +- .../ChangeStreamContinuationTokenTest.java | 99 +++++++++++++++++++ 5 files changed, 164 insertions(+), 51 deletions(-) create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java index bb5363a3a5..af7b15ea4e 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java @@ -18,11 +18,12 @@ import com.google.api.core.InternalApi; import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.common.base.MoreObjects; import com.google.common.base.Objects; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; import java.io.Serializable; import javax.annotation.Nonnull; @@ -30,48 +31,59 @@ public final class ChangeStreamContinuationToken implements Serializable { private static final long serialVersionUID = 524679926247095L; - private transient StreamContinuationToken.Builder builder; + private final StreamContinuationToken tokenProto; - private ChangeStreamContinuationToken(@Nonnull StreamContinuationToken.Builder builder) { - this.builder = builder; + private ChangeStreamContinuationToken(@Nonnull StreamContinuationToken tokenProto) { + this.tokenProto = tokenProto; } - private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { - input.defaultReadObject(); - builder = StreamContinuationToken.newBuilder().mergeFrom(input); - } - - private void writeObject(ObjectOutputStream output) throws IOException { - output.defaultWriteObject(); - builder.build().writeTo(output); + @InternalApi("Used in Changestream beam pipeline.") + public ChangeStreamContinuationToken( + @Nonnull ByteStringRange byteStringRange, @Nonnull String token) { + this.tokenProto = + StreamContinuationToken.newBuilder() + .setPartition( + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(byteStringRange.getStart()) + .setEndKeyOpen(byteStringRange.getEnd()) + .build()) + .build()) + .setToken(token) + .build(); } + // TODO: Change this to return ByteStringRange. public RowRange getRowRange() { - return this.builder.getPartition().getRowRange(); + return this.tokenProto.getPartition().getRowRange(); } public String getToken() { - return this.builder.getToken(); + return this.tokenProto.getToken(); } - /** - * Creates the protobuf. This method is considered an internal implementation detail and not meant - * to be used by applications. - */ - @InternalApi("Used in Changestream beam pipeline.") - public StreamContinuationToken toProto() { - return builder.build(); + // Creates the protobuf. + StreamContinuationToken toProto() { + return tokenProto; } /** Wraps the protobuf {@link StreamContinuationToken}. */ - @InternalApi("Used in Changestream beam pipeline.") - public static ChangeStreamContinuationToken fromProto( + static ChangeStreamContinuationToken fromProto( @Nonnull StreamContinuationToken streamContinuationToken) { - return new ChangeStreamContinuationToken(streamContinuationToken.toBuilder()); + return new ChangeStreamContinuationToken(streamContinuationToken); } - public ChangeStreamContinuationToken clone() { - return new ChangeStreamContinuationToken(this.builder.clone()); + @InternalApi("Used in Changestream beam pipeline.") + public ByteString toByteString() { + return tokenProto.toByteString(); + } + + @InternalApi("Used in Changestream beam pipeline.") + public static ChangeStreamContinuationToken fromByteString(ByteString byteString) + throws InvalidProtocolBufferException { + return new ChangeStreamContinuationToken( + StreamContinuationToken.newBuilder().mergeFrom(byteString).build()); } @Override diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java index f9107220b3..cfb8bb30b7 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java @@ -15,7 +15,7 @@ */ package com.google.cloud.bigtable.data.v2.models; -import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMerger; @@ -62,8 +62,11 @@ * builder.deleteCells(...); * ChangeStreamMutation changeStreamMutation = builder.setToken(...).setLowWatermark().build(); * } + * + * Make this class non-final so that we can create a subclass to mock it. */ -public final class ChangeStreamMutation implements ChangeStreamRecord, Serializable { +@InternalExtensionOnly("Used in Changestream beam pipeline testing.") +public class ChangeStreamMutation implements ChangeStreamRecord, Serializable { private static final long serialVersionUID = 8419520253162024218L; private final ByteString rowKey; @@ -100,8 +103,7 @@ private ChangeStreamMutation(Builder builder) { * ChangeStreamMutation because `token` and `loWatermark` must be set later when we finish * building the logical mutation. */ - @InternalApi("Used in Changestream beam pipeline.") - public static Builder createUserMutation( + static Builder createUserMutation( @Nonnull ByteString rowKey, @Nonnull String sourceClusterId, @Nonnull Timestamp commitTimestamp, @@ -114,8 +116,7 @@ public static Builder createUserMutation( * because `token` and `loWatermark` must be set later when we finish building the logical * mutation. */ - @InternalApi("Used in Changestream beam pipeline.") - public static Builder createGcMutation( + static Builder createGcMutation( @Nonnull ByteString rowKey, @Nonnull Timestamp commitTimestamp, int tieBreaker) { return new Builder(rowKey, Type.GARBAGE_COLLECTION, null, commitTimestamp, tieBreaker); } @@ -227,8 +228,7 @@ private Builder(ChangeStreamMutation changeStreamMutation) { this.lowWatermark = changeStreamMutation.lowWatermark; } - @InternalApi("Used in Changestream beam pipeline.") - public Builder setCell( + Builder setCell( @Nonnull String familyName, @Nonnull ByteString qualifier, long timestamp, @@ -237,8 +237,7 @@ public Builder setCell( return this; } - @InternalApi("Used in Changestream beam pipeline.") - public Builder deleteCells( + Builder deleteCells( @Nonnull String familyName, @Nonnull ByteString qualifier, @Nonnull TimestampRange timestampRange) { @@ -246,26 +245,22 @@ public Builder deleteCells( return this; } - @InternalApi("Used in Changestream beam pipeline.") - public Builder deleteFamily(@Nonnull String familyName) { + Builder deleteFamily(@Nonnull String familyName) { this.entries.add(DeleteFamily.create(familyName)); return this; } - @InternalApi("Used in Changestream beam pipeline.") - public Builder setToken(@Nonnull String token) { + Builder setToken(@Nonnull String token) { this.token = token; return this; } - @InternalApi("Used in Changestream beam pipeline.") - public Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { + Builder setLowWatermark(@Nonnull Timestamp lowWatermark) { this.lowWatermark = lowWatermark; return this; } - @InternalApi("Used in Changestream beam pipeline.") - public ChangeStreamMutation build() { + ChangeStreamMutation build() { Preconditions.checkArgument( token != null && lowWatermark != null, "ChangeStreamMutation must have a continuation token and low watermark."); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java index e871c86697..346b0b60a7 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.data.v2.models; import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.bigtable.v2.StreamContinuationToken; import com.google.common.base.MoreObjects; @@ -29,7 +30,12 @@ import java.util.List; import javax.annotation.Nonnull; -public final class CloseStream implements ChangeStreamRecord, Serializable { +/** + * A simple wrapper for {@link ReadChangeStreamResponse.CloseStream}. Make this class non-final so + * that we can create a subclass to mock it. + */ +@InternalExtensionOnly("Used in Changestream beam pipeline testing.") +public class CloseStream implements ChangeStreamRecord, Serializable { private static final long serialVersionUID = 7316215828353608505L; private final Status status; private transient ImmutableList.Builder @@ -69,8 +75,7 @@ private void writeObject(ObjectOutputStream output) throws IOException { } /** Wraps the protobuf {@link ReadChangeStreamResponse.CloseStream}. */ - @InternalApi("Used in Changestream beam pipeline.") - public static CloseStream fromProto(@Nonnull ReadChangeStreamResponse.CloseStream closeStream) { + static CloseStream fromProto(@Nonnull ReadChangeStreamResponse.CloseStream closeStream) { return new CloseStream(closeStream.getStatus(), closeStream.getContinuationTokensList()); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java index 63c23525f3..40daa9d23a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Heartbeat.java @@ -16,12 +16,15 @@ package com.google.cloud.bigtable.data.v2.models; import com.google.api.core.InternalApi; +import com.google.api.core.InternalExtensionOnly; import com.google.auto.value.AutoValue; import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.protobuf.Timestamp; import java.io.Serializable; import javax.annotation.Nonnull; +/** A simple wrapper for {@link ReadChangeStreamResponse.Heartbeat}. */ +@InternalExtensionOnly("Used in Changestream beam pipeline testing.") @AutoValue public abstract class Heartbeat implements ChangeStreamRecord, Serializable { private static final long serialVersionUID = 7316215828353608504L; @@ -32,8 +35,7 @@ private static Heartbeat create( } /** Wraps the protobuf {@link ReadChangeStreamResponse.Heartbeat}. */ - @InternalApi("Used in Changestream beam pipeline.") - public static Heartbeat fromProto(@Nonnull ReadChangeStreamResponse.Heartbeat heartbeat) { + static Heartbeat fromProto(@Nonnull ReadChangeStreamResponse.Heartbeat heartbeat) { return create( ChangeStreamContinuationToken.fromProto(heartbeat.getContinuationToken()), heartbeat.getLowWatermark()); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java new file mode 100644 index 0000000000..e93dfc70bf --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java @@ -0,0 +1,99 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.models; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.StreamContinuationToken; +import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ChangeStreamContinuationTokenTest { + + private final String TOKEN = "token"; + + private ByteStringRange createFakeByteStringRange() { + return ByteStringRange.create("a", "b"); + } + + // TODO: Get rid of this once we change ChangeStreamContinuationToken::getRowRange() + // to ChangeStreamContinuationToken::getByteStringRange(). + private RowRange rowRangeFromByteStringRange(ByteStringRange byteStringRange) { + return RowRange.newBuilder() + .setStartKeyClosed(byteStringRange.getStart()) + .setEndKeyOpen(byteStringRange.getEnd()) + .build(); + } + + @Test + public void basicTest() throws Exception { + ByteStringRange byteStringRange = createFakeByteStringRange(); + ChangeStreamContinuationToken changeStreamContinuationToken = + new ChangeStreamContinuationToken(byteStringRange, TOKEN); + Assert.assertEquals( + changeStreamContinuationToken.getRowRange(), rowRangeFromByteStringRange(byteStringRange)); + Assert.assertEquals(changeStreamContinuationToken.getToken(), TOKEN); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(changeStreamContinuationToken); + oos.close(); + ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); + ChangeStreamContinuationToken actual = (ChangeStreamContinuationToken) ois.readObject(); + assertThat(actual).isEqualTo(changeStreamContinuationToken); + } + + @Test + public void fromProtoTest() { + ByteStringRange byteStringRange = createFakeByteStringRange(); + RowRange fakeRowRange = rowRangeFromByteStringRange(byteStringRange); + StreamContinuationToken proto = + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(fakeRowRange).build()) + .setToken(TOKEN) + .build(); + ChangeStreamContinuationToken changeStreamContinuationToken = + ChangeStreamContinuationToken.fromProto(proto); + Assert.assertEquals(changeStreamContinuationToken.getRowRange(), fakeRowRange); + Assert.assertEquals(changeStreamContinuationToken.getToken(), TOKEN); + Assert.assertEquals( + changeStreamContinuationToken, + ChangeStreamContinuationToken.fromProto(changeStreamContinuationToken.toProto())); + } + + @Test + public void toByteStringTest() throws Exception { + ByteStringRange byteStringRange = createFakeByteStringRange(); + ChangeStreamContinuationToken changeStreamContinuationToken = + new ChangeStreamContinuationToken(byteStringRange, TOKEN); + Assert.assertEquals( + changeStreamContinuationToken.getRowRange(), rowRangeFromByteStringRange(byteStringRange)); + Assert.assertEquals(changeStreamContinuationToken.getToken(), TOKEN); + Assert.assertEquals( + changeStreamContinuationToken, + ChangeStreamContinuationToken.fromByteString(changeStreamContinuationToken.toByteString())); + } +} From 2a4e7867a8d3a68bde1a116b090552ef85e73205 Mon Sep 17 00:00:00 2001 From: Tony Tang Date: Fri, 12 Aug 2022 11:35:25 -0400 Subject: [PATCH 10/13] feat!: rename ListChangeStreamPartitions to GenerateInitialChangeStreamPartitions (#1347) * Rename ListChangeStreamPartitions to GenerateInitialChangeStreamPartitions Change-Id: Id306df92de00e172ae900a9c4bf95de856edd90f * Fix formatting Change-Id: I5a45afa15b8b4b65a10fd17987f2d832f6924213 * Fix more formatting Change-Id: Ib7668abf8f61a5c939323c55dad14bc57501232e * Fix more formatting after rebase Change-Id: I88a545e8d34db9f5e675b6ef7409a9fbf3102d3d --- .../bigtable/data/v2/BigtableDataClient.java | 23 +- .../bigtable/data/v2/gapic_metadata.json | 6 + .../bigtable/data/v2/stub/BigtableStub.java | 11 +- .../data/v2/stub/BigtableStubSettings.java | 47 +- .../data/v2/stub/EnhancedBigtableStub.java | 71 +- .../v2/stub/EnhancedBigtableStubSettings.java | 32 +- .../data/v2/stub/GrpcBigtableStub.java | 52 +- ...alChangeStreamPartitionsUserCallable.java} | 28 +- .../data/v2/BigtableDataClientTests.java | 34 +- .../EnhancedBigtableStubSettingsTest.java | 2 +- ...angeStreamPartitionsUserCallableTest.java} | 31 +- .../com/google/bigtable/v2/BigtableGrpc.java | 99 +- .../com/google/bigtable/v2/BigtableProto.java | 284 +-- ...InitialChangeStreamPartitionsRequest.java} | 377 +-- ...angeStreamPartitionsRequestOrBuilder.java} | 16 +- ...nitialChangeStreamPartitionsResponse.java} | 269 ++- ...ngeStreamPartitionsResponseOrBuilder.java} | 4 +- .../bigtable/v2/ReadChangeStreamRequest.java | 452 ++-- .../v2/ReadChangeStreamRequestOrBuilder.java | 14 +- .../bigtable/v2/ReadChangeStreamResponse.java | 2093 ++++++++++------- .../v2/ReadChangeStreamResponseOrBuilder.java | 14 +- .../proto/google/bigtable/v2/bigtable.proto | 26 +- 22 files changed, 2250 insertions(+), 1735 deletions(-) rename google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/{ListChangeStreamPartitionsUserCallable.java => GenerateInitialChangeStreamPartitionsUserCallable.java} (73%) rename google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/{ListChangeStreamPartitionsUserCallableTest.java => GenerateInitialChangeStreamPartitionsUserCallableTest.java} (70%) rename proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/{ListChangeStreamPartitionsRequest.java => GenerateInitialChangeStreamPartitionsRequest.java} (62%) rename proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/{ListChangeStreamPartitionsRequestOrBuilder.java => GenerateInitialChangeStreamPartitionsRequestOrBuilder.java} (89%) rename proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/{ListChangeStreamPartitionsResponse.java => GenerateInitialChangeStreamPartitionsResponse.java} (64%) rename proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/{ListChangeStreamPartitionsResponseOrBuilder.java => GenerateInitialChangeStreamPartitionsResponseOrBuilder.java} (92%) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java index 2de6e9b94a..acfbff0747 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java @@ -1503,7 +1503,7 @@ public UnaryCallable readModifyWriteRowCallable() { * String tableId = "[TABLE]"; * * try { - * ServerStream stream = bigtableDataClient.listChangeStreamPartitions(tableId); + * ServerStream stream = bigtableDataClient.generateInitialChangeStreamPartitions(tableId); * int count = 0; * * // Iterator style @@ -1525,8 +1525,8 @@ public UnaryCallable readModifyWriteRowCallable() { * @see ServerStreamingCallable For call styles. */ @InternalApi("Used in Changestream beam pipeline.") - public ServerStream listChangeStreamPartitions(String tableId) { - return listChangeStreamPartitionsCallable().call(tableId); + public ServerStream generateInitialChangeStreamPartitions(String tableId) { + return generateInitialChangeStreamPartitionsCallable().call(tableId); } /** @@ -1538,7 +1538,7 @@ public ServerStream listChangeStreamPartitions(String tableId) { * try (BigtableDataClient bigtableDataClient = BigtableDataClient.create("[PROJECT]", "[INSTANCE]")) { * String tableId = "[TABLE]"; * - * bigtableDataClient.listChangeStreamPartitionsAsync(tableId, new ResponseObserver() { + * bigtableDataClient.generateInitialChangeStreamPartitionsAsync(tableId, new ResponseObserver() { * StreamController controller; * int count = 0; * @@ -1567,8 +1567,9 @@ public ServerStream listChangeStreamPartitions(String tableId) { * } */ @InternalApi("Used in Changestream beam pipeline.") - public void listChangeStreamPartitionsAsync(String tableId, ResponseObserver observer) { - listChangeStreamPartitionsCallable().call(tableId, observer); + public void generateInitialChangeStreamPartitionsAsync( + String tableId, ResponseObserver observer) { + generateInitialChangeStreamPartitionsCallable().call(tableId, observer); } /** @@ -1583,7 +1584,7 @@ public void listChangeStreamPartitionsAsync(String tableId, ResponseObserver partitions = bigtableDataClient.listChangeStreamPartitionsCallable().all().call(tableId); + * List partitions = bigtableDataClient.generateInitialChangeStreamPartitionsCallable().all().call(tableId); * } catch (NotFoundException e) { * System.out.println("Tried to read a non-existent table"); * } catch (RuntimeException e) { @@ -1603,7 +1604,7 @@ public void listChangeStreamPartitionsAsync(String tableId, ResponseObserver partitionFuture = - * bigtableDataClient.listChangeStreamPartitionsCallable().first().futureCall(tableId); + * bigtableDataClient.generateInitialChangeStreamPartitionsCallable().first().futureCall(tableId); * * ApiFutures.addCallback(partitionFuture, new ApiFutureCallback() { * public void onFailure(Throwable t) { @@ -1625,8 +1626,8 @@ public void listChangeStreamPartitionsAsync(String tableId, ResponseObserver listChangeStreamPartitionsCallable() { - return stub.listChangeStreamPartitionsCallable(); + public ServerStreamingCallable generateInitialChangeStreamPartitionsCallable() { + return stub.generateInitialChangeStreamPartitionsCallable(); } /** diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json index 77b50a1f50..495762d219 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/gapic_metadata.json @@ -13,6 +13,9 @@ "CheckAndMutateRow": { "methods": ["checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRow", "checkAndMutateRowCallable"] }, + "GenerateInitialChangeStreamPartitions": { + "methods": ["generateInitialChangeStreamPartitionsCallable"] + }, "MutateRow": { "methods": ["mutateRow", "mutateRow", "mutateRow", "mutateRow", "mutateRow", "mutateRowCallable"] }, @@ -22,6 +25,9 @@ "PingAndWarm": { "methods": ["pingAndWarm", "pingAndWarm", "pingAndWarm", "pingAndWarm", "pingAndWarm", "pingAndWarmCallable"] }, + "ReadChangeStream": { + "methods": ["readChangeStreamCallable"] + }, "ReadModifyWriteRow": { "methods": ["readModifyWriteRow", "readModifyWriteRow", "readModifyWriteRow", "readModifyWriteRow", "readModifyWriteRow", "readModifyWriteRowCallable"] }, diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java index a3ab3f3951..7be629f8cf 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStub.java @@ -22,8 +22,8 @@ import com.google.api.gax.rpc.UnaryCallable; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; -import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; -import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; @@ -78,10 +78,11 @@ public UnaryCallable pingAndWarmCallabl } public ServerStreamingCallable< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsCallable() { + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsCallable() { throw new UnsupportedOperationException( - "Not implemented: listChangeStreamPartitionsCallable()"); + "Not implemented: generateInitialChangeStreamPartitionsCallable()"); } public ServerStreamingCallable diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java index 5c77a08132..6f3143abca 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java @@ -35,8 +35,8 @@ import com.google.api.gax.rpc.UnaryCallSettings; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; -import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; -import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; @@ -88,8 +88,9 @@ public class BigtableStubSettings extends StubSettings { private final UnaryCallSettings readModifyWriteRowSettings; private final ServerStreamingCallSettings< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsSettings; + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsSettings; private final ServerStreamingCallSettings readChangeStreamSettings; @@ -131,11 +132,14 @@ public UnaryCallSettings pingAndWarmSet return readModifyWriteRowSettings; } - /** Returns the object with the settings used for calls to listChangeStreamPartitions. */ + /** + * Returns the object with the settings used for calls to generateInitialChangeStreamPartitions. + */ public ServerStreamingCallSettings< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsSettings() { - return listChangeStreamPartitionsSettings; + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsSettings() { + return generateInitialChangeStreamPartitionsSettings; } /** Returns the object with the settings used for calls to readChangeStream. */ @@ -225,8 +229,8 @@ protected BigtableStubSettings(Builder settingsBuilder) throws IOException { checkAndMutateRowSettings = settingsBuilder.checkAndMutateRowSettings().build(); pingAndWarmSettings = settingsBuilder.pingAndWarmSettings().build(); readModifyWriteRowSettings = settingsBuilder.readModifyWriteRowSettings().build(); - listChangeStreamPartitionsSettings = - settingsBuilder.listChangeStreamPartitionsSettings().build(); + generateInitialChangeStreamPartitionsSettings = + settingsBuilder.generateInitialChangeStreamPartitionsSettings().build(); readChangeStreamSettings = settingsBuilder.readChangeStreamSettings().build(); } @@ -247,8 +251,9 @@ public static class Builder extends StubSettings.Builder readModifyWriteRowSettings; private final ServerStreamingCallSettings.Builder< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsSettings; + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsSettings; private final ServerStreamingCallSettings.Builder< ReadChangeStreamRequest, ReadChangeStreamResponse> readChangeStreamSettings; @@ -362,7 +367,7 @@ protected Builder(ClientContext clientContext) { checkAndMutateRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); pingAndWarmSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); readModifyWriteRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); - listChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder(); + generateInitialChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder(); readChangeStreamSettings = ServerStreamingCallSettings.newBuilder(); unaryMethodSettingsBuilders = @@ -384,7 +389,8 @@ protected Builder(BigtableStubSettings settings) { checkAndMutateRowSettings = settings.checkAndMutateRowSettings.toBuilder(); pingAndWarmSettings = settings.pingAndWarmSettings.toBuilder(); readModifyWriteRowSettings = settings.readModifyWriteRowSettings.toBuilder(); - listChangeStreamPartitionsSettings = settings.listChangeStreamPartitionsSettings.toBuilder(); + generateInitialChangeStreamPartitionsSettings = + settings.generateInitialChangeStreamPartitionsSettings.toBuilder(); readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder(); unaryMethodSettingsBuilders = @@ -445,7 +451,7 @@ private static Builder initDefaults(Builder builder) { .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_0_params")); builder - .listChangeStreamPartitionsSettings() + .generateInitialChangeStreamPartitionsSettings() .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_5_codes")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_5_params")); @@ -513,11 +519,14 @@ public UnaryCallSettings.Builder mutateRowS return readModifyWriteRowSettings; } - /** Returns the builder for the settings used for calls to listChangeStreamPartitions. */ + /** + * Returns the builder for the settings used for calls to generateInitialChangeStreamPartitions. + */ public ServerStreamingCallSettings.Builder< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsSettings() { - return listChangeStreamPartitionsSettings; + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsSettings() { + return generateInitialChangeStreamPartitionsSettings; } /** Returns the builder for the settings used for calls to readChangeStream. */ diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index 6b5746fea2..4e29f2a3f5 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -47,8 +47,8 @@ import com.google.bigtable.v2.BigtableGrpc; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; -import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; -import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; @@ -81,7 +81,7 @@ import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMergingCallable; -import com.google.cloud.bigtable.data.v2.stub.changestream.ListChangeStreamPartitionsUserCallable; +import com.google.cloud.bigtable.data.v2.stub.changestream.GenerateInitialChangeStreamPartitionsUserCallable; import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamResumptionStrategy; import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamUserCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerStreamingCallable; @@ -155,7 +155,8 @@ public class EnhancedBigtableStub implements AutoCloseable { private final UnaryCallable checkAndMutateRowCallable; private final UnaryCallable readModifyWriteRowCallable; - private final ServerStreamingCallable listChangeStreamPartitionsCallable; + private final ServerStreamingCallable + generateInitialChangeStreamPartitionsCallable; private final ServerStreamingCallable readChangeStreamCallable; @@ -302,7 +303,8 @@ public EnhancedBigtableStub(EnhancedBigtableStubSettings settings, ClientContext bulkMutateRowsCallable = createBulkMutateRowsCallable(); checkAndMutateRowCallable = createCheckAndMutateRowCallable(); readModifyWriteRowCallable = createReadModifyWriteRowCallable(); - listChangeStreamPartitionsCallable = createListChangeStreamPartitionsCallable(); + generateInitialChangeStreamPartitionsCallable = + createGenerateInitialChangeStreamPartitionsCallable(); readChangeStreamCallable = createReadChangeStreamCallable(new DefaultChangeStreamRecordAdapter()); } @@ -820,57 +822,68 @@ public Map extract(ReadModifyWriteRowRequest request) { } /** - * Creates a callable chain to handle streaming ListChangeStreamPartitions RPCs. The chain will: + * Creates a callable chain to handle streaming GenerateInitialChangeStreamPartitions RPCs. The + * chain will: * *

      *
    • Convert a String format tableId into a {@link - * com.google.bigtable.v2.ListChangeStreamPartitionsRequest} and dispatch the RPC. + * GenerateInitialChangeStreamPartitionsRequest} and dispatch the RPC. *
    • Upon receiving the response stream, it will convert the {@link - * com.google.bigtable.v2.ListChangeStreamPartitionsResponse}s into {@link RowRange}. + * com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse}s into {@link + * RowRange}. *
    */ - private ServerStreamingCallable createListChangeStreamPartitionsCallable() { - ServerStreamingCallable + private ServerStreamingCallable + createGenerateInitialChangeStreamPartitionsCallable() { + ServerStreamingCallable< + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> base = GrpcRawCallableFactory.createServerStreamingCallable( GrpcCallSettings - . + . newBuilder() - .setMethodDescriptor(BigtableGrpc.getListChangeStreamPartitionsMethod()) + .setMethodDescriptor( + BigtableGrpc.getGenerateInitialChangeStreamPartitionsMethod()) .setParamsExtractor( - new RequestParamsExtractor() { + new RequestParamsExtractor() { @Override public Map extract( - ListChangeStreamPartitionsRequest listChangeStreamPartitionsRequest) { + GenerateInitialChangeStreamPartitionsRequest + generateInitialChangeStreamPartitionsRequest) { return ImmutableMap.of( "table_name", - listChangeStreamPartitionsRequest.getTableName(), + generateInitialChangeStreamPartitionsRequest.getTableName(), "app_profile_id", - listChangeStreamPartitionsRequest.getAppProfileId()); + generateInitialChangeStreamPartitionsRequest.getAppProfileId()); } }) .build(), - settings.listChangeStreamPartitionsSettings().getRetryableCodes()); + settings.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()); ServerStreamingCallable userCallable = - new ListChangeStreamPartitionsUserCallable(base, requestContext); + new GenerateInitialChangeStreamPartitionsUserCallable(base, requestContext); ServerStreamingCallable withStatsHeaders = new StatsHeadersServerStreamingCallable<>(userCallable); - // Sometimes ListChangeStreamPartitions connections are disconnected via an RST frame. This - // error is transient and should be treated similar to UNAVAILABLE. However, this exception - // has an INTERNAL error code which by default is not retryable. Convert the exception so it - // can be retried in the client. + // Sometimes GenerateInitialChangeStreamPartitions connections are disconnected via an RST + // frame. This error is transient and should be treated similar to UNAVAILABLE. However, this + // exception has an INTERNAL error code which by default is not retryable. Convert the exception + // so it can be retried in the client. ServerStreamingCallable convertException = new ConvertStreamExceptionCallable<>(withStatsHeaders); // Copy idle timeout settings for watchdog. ServerStreamingCallSettings innerSettings = ServerStreamingCallSettings.newBuilder() - .setRetryableCodes(settings.listChangeStreamPartitionsSettings().getRetryableCodes()) - .setRetrySettings(settings.listChangeStreamPartitionsSettings().getRetrySettings()) - .setIdleTimeout(settings.listChangeStreamPartitionsSettings().getIdleTimeout()) + .setRetryableCodes( + settings.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()) + .setRetrySettings( + settings.generateInitialChangeStreamPartitionsSettings().getRetrySettings()) + .setIdleTimeout( + settings.generateInitialChangeStreamPartitionsSettings().getIdleTimeout()) .build(); ServerStreamingCallable watched = @@ -882,7 +895,7 @@ public Map extract( ServerStreamingCallable retrying = Callables.retrying(withBigtableTracer, innerSettings, clientContext); - SpanName span = getSpanName("ListChangeStreamPartitions"); + SpanName span = getSpanName("GenerateInitialChangeStreamPartitions"); ServerStreamingCallable traced = new TracedServerStreamingCallable<>(retrying, clientContext.getTracerFactory(), span); @@ -1025,9 +1038,9 @@ public UnaryCallable readModifyWriteRowCallable() { return readModifyWriteRowCallable; } - /** Returns a streaming list change stream partitions callable */ - public ServerStreamingCallable listChangeStreamPartitionsCallable() { - return listChangeStreamPartitionsCallable; + /** Returns a streaming generate initial change stream partitions callable */ + public ServerStreamingCallable generateInitialChangeStreamPartitionsCallable() { + return generateInitialChangeStreamPartitionsCallable; } /** Returns a streaming read change stream callable. */ diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 68769c9602..9d2a731018 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -140,10 +140,10 @@ public class EnhancedBigtableStubSettings extends StubSettings LIST_CHANGE_STREAM_PARTITIONS_RETRY_CODES = + private static final Set GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_CODES = ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); - private static final RetrySettings LIST_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS = + private static final RetrySettings GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS = RetrySettings.newBuilder() .setInitialRetryDelay(Duration.ofMillis(10)) .setRetryDelayMultiplier(2.0) @@ -212,7 +212,8 @@ public class EnhancedBigtableStubSettings extends StubSettings checkAndMutateRowSettings; private final UnaryCallSettings readModifyWriteRowSettings; - private final ServerStreamingCallSettings listChangeStreamPartitionsSettings; + private final ServerStreamingCallSettings + generateInitialChangeStreamPartitionsSettings; private final ServerStreamingCallSettings readChangeStreamSettings; @@ -250,7 +251,8 @@ private EnhancedBigtableStubSettings(Builder builder) { bulkReadRowsSettings = builder.bulkReadRowsSettings.build(); checkAndMutateRowSettings = builder.checkAndMutateRowSettings.build(); readModifyWriteRowSettings = builder.readModifyWriteRowSettings.build(); - listChangeStreamPartitionsSettings = builder.listChangeStreamPartitionsSettings.build(); + generateInitialChangeStreamPartitionsSettings = + builder.generateInitialChangeStreamPartitionsSettings.build(); readChangeStreamSettings = builder.readChangeStreamSettings.build(); } @@ -535,8 +537,9 @@ public UnaryCallSettings readModifyWriteRowSettings() { return readModifyWriteRowSettings; } - public ServerStreamingCallSettings listChangeStreamPartitionsSettings() { - return listChangeStreamPartitionsSettings; + public ServerStreamingCallSettings + generateInitialChangeStreamPartitionsSettings() { + return generateInitialChangeStreamPartitionsSettings; } public ServerStreamingCallSettings @@ -569,7 +572,7 @@ public static class Builder extends StubSettings.Builder readModifyWriteRowSettings; private final ServerStreamingCallSettings.Builder - listChangeStreamPartitionsSettings; + generateInitialChangeStreamPartitionsSettings; private final ServerStreamingCallSettings.Builder readChangeStreamSettings; @@ -684,10 +687,10 @@ private Builder() { readModifyWriteRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); copyRetrySettings(baseDefaults.readModifyWriteRowSettings(), readModifyWriteRowSettings); - listChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder(); - listChangeStreamPartitionsSettings - .setRetryableCodes(LIST_CHANGE_STREAM_PARTITIONS_RETRY_CODES) - .setRetrySettings(LIST_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS) + generateInitialChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder(); + generateInitialChangeStreamPartitionsSettings + .setRetryableCodes(GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_CODES) + .setRetrySettings(GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS) .setIdleTimeout(Duration.ofMinutes(5)); readChangeStreamSettings = ServerStreamingCallSettings.newBuilder(); @@ -715,7 +718,8 @@ private Builder(EnhancedBigtableStubSettings settings) { bulkReadRowsSettings = settings.bulkReadRowsSettings.toBuilder(); checkAndMutateRowSettings = settings.checkAndMutateRowSettings.toBuilder(); readModifyWriteRowSettings = settings.readModifyWriteRowSettings.toBuilder(); - listChangeStreamPartitionsSettings = settings.listChangeStreamPartitionsSettings.toBuilder(); + generateInitialChangeStreamPartitionsSettings = + settings.generateInitialChangeStreamPartitionsSettings.toBuilder(); readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder(); } // @@ -928,7 +932,9 @@ public String toString() { .add("bulkReadRowsSettings", bulkReadRowsSettings) .add("checkAndMutateRowSettings", checkAndMutateRowSettings) .add("readModifyWriteRowSettings", readModifyWriteRowSettings) - .add("listChangeStreamPartitionsSettings", listChangeStreamPartitionsSettings) + .add( + "generateInitialChangeStreamPartitionsSettings", + generateInitialChangeStreamPartitionsSettings) .add("readChangeStreamSettings", readChangeStreamSettings) .add("parent", super.toString()) .toString(); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java index b2c219bb3f..1a8ef37421 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/GrpcBigtableStub.java @@ -28,8 +28,8 @@ import com.google.api.pathtemplate.PathTemplate; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; -import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; -import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; import com.google.bigtable.v2.MutateRowsRequest; @@ -128,16 +128,22 @@ public class GrpcBigtableStub extends BigtableStub { .build(); private static final MethodDescriptor< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsMethodDescriptor = + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsMethodDescriptor = MethodDescriptor - .newBuilder() + . + newBuilder() .setType(MethodDescriptor.MethodType.SERVER_STREAMING) - .setFullMethodName("google.bigtable.v2.Bigtable/ListChangeStreamPartitions") + .setFullMethodName( + "google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions") .setRequestMarshaller( - ProtoUtils.marshaller(ListChangeStreamPartitionsRequest.getDefaultInstance())) + ProtoUtils.marshaller( + GenerateInitialChangeStreamPartitionsRequest.getDefaultInstance())) .setResponseMarshaller( - ProtoUtils.marshaller(ListChangeStreamPartitionsResponse.getDefaultInstance())) + ProtoUtils.marshaller( + GenerateInitialChangeStreamPartitionsResponse.getDefaultInstance())) .build(); private static final MethodDescriptor @@ -162,8 +168,9 @@ public class GrpcBigtableStub extends BigtableStub { private final UnaryCallable readModifyWriteRowCallable; private final ServerStreamingCallable< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsCallable; + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsCallable; private final ServerStreamingCallable readChangeStreamCallable; @@ -332,11 +339,15 @@ protected GrpcBigtableStub( return builder.build(); }) .build(); - GrpcCallSettings - listChangeStreamPartitionsTransportSettings = + GrpcCallSettings< + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsTransportSettings = GrpcCallSettings - .newBuilder() - .setMethodDescriptor(listChangeStreamPartitionsMethodDescriptor) + . + newBuilder() + .setMethodDescriptor(generateInitialChangeStreamPartitionsMethodDescriptor) .setParamsExtractor( request -> { ImmutableMap.Builder params = ImmutableMap.builder(); @@ -381,10 +392,10 @@ protected GrpcBigtableStub( readModifyWriteRowTransportSettings, settings.readModifyWriteRowSettings(), clientContext); - this.listChangeStreamPartitionsCallable = + this.generateInitialChangeStreamPartitionsCallable = callableFactory.createServerStreamingCallable( - listChangeStreamPartitionsTransportSettings, - settings.listChangeStreamPartitionsSettings(), + generateInitialChangeStreamPartitionsTransportSettings, + settings.generateInitialChangeStreamPartitionsSettings(), clientContext); this.readChangeStreamCallable = callableFactory.createServerStreamingCallable( @@ -438,9 +449,10 @@ public UnaryCallable pingAndWarmCallabl @Override public ServerStreamingCallable< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> - listChangeStreamPartitionsCallable() { - return listChangeStreamPartitionsCallable; + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> + generateInitialChangeStreamPartitionsCallable() { + return generateInitialChangeStreamPartitionsCallable; } @Override diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java similarity index 73% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java index 1d3393bb2b..365cf56ff2 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java @@ -19,22 +19,28 @@ import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.StreamController; -import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; -import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; -/** Simple wrapper for ListChangeStreamPartitions to wrap the request and response protobufs. */ -public class ListChangeStreamPartitionsUserCallable +/** + * Simple wrapper for GenerateInitialChangeStreamPartitions to wrap the request and response + * protobufs. + */ +public class GenerateInitialChangeStreamPartitionsUserCallable extends ServerStreamingCallable { private final RequestContext requestContext; private final ServerStreamingCallable< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> inner; - public ListChangeStreamPartitionsUserCallable( - ServerStreamingCallable + public GenerateInitialChangeStreamPartitionsUserCallable( + ServerStreamingCallable< + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> inner, RequestContext requestContext) { this.requestContext = requestContext; @@ -47,8 +53,8 @@ public void call( String tableName = NameUtil.formatTableName( requestContext.getProjectId(), requestContext.getInstanceId(), tableId); - ListChangeStreamPartitionsRequest request = - ListChangeStreamPartitionsRequest.newBuilder() + GenerateInitialChangeStreamPartitionsRequest request = + GenerateInitialChangeStreamPartitionsRequest.newBuilder() .setTableName(tableName) .setAppProfileId(requestContext.getAppProfileId()) .build(); @@ -57,7 +63,7 @@ public void call( } private class ConvertPartitionToRangeObserver - implements ResponseObserver { + implements ResponseObserver { private final ResponseObserver outerObserver; @@ -71,7 +77,7 @@ public void onStart(final StreamController controller) { } @Override - public void onResponse(ListChangeStreamPartitionsResponse response) { + public void onResponse(GenerateInitialChangeStreamPartitionsResponse response) { RowRange rowRange = RowRange.newBuilder() .setStartKeyClosed(response.getPartition().getRowRange().getStartKeyClosed()) diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java index 648a298155..c3850e7e15 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java @@ -83,7 +83,8 @@ public class BigtableDataClientTests { @Mock private Batcher mockBulkReadRowsBatcher; @Mock(answer = Answers.RETURNS_DEEP_STUBS) - private ServerStreamingCallable mockListChangeStreamPartitionsCallable; + private ServerStreamingCallable + mockGenerateInitialChangeStreamPartitionsCallable; @Mock(answer = Answers.RETURNS_DEEP_STUBS) private ServerStreamingCallable @@ -164,11 +165,11 @@ public void proxyReadRowsCallableTest() { } @Test - public void proxyListChangeStreamPartitionsCallableTest() { - Mockito.when(mockStub.listChangeStreamPartitionsCallable()) - .thenReturn(mockListChangeStreamPartitionsCallable); - assertThat(bigtableDataClient.listChangeStreamPartitionsCallable()) - .isSameInstanceAs(mockListChangeStreamPartitionsCallable); + public void proxyGenerateInitialChangeStreamPartitionsCallableTest() { + Mockito.when(mockStub.generateInitialChangeStreamPartitionsCallable()) + .thenReturn(mockGenerateInitialChangeStreamPartitionsCallable); + assertThat(bigtableDataClient.generateInitialChangeStreamPartitionsCallable()) + .isSameInstanceAs(mockGenerateInitialChangeStreamPartitionsCallable); } @Test @@ -326,25 +327,26 @@ public void proxyReadRowsAsyncTest() { } @Test - public void proxyListChangeStreamPartitionsSyncTest() { - Mockito.when(mockStub.listChangeStreamPartitionsCallable()) - .thenReturn(mockListChangeStreamPartitionsCallable); + public void proxyGenerateInitialChangeStreamPartitionsSyncTest() { + Mockito.when(mockStub.generateInitialChangeStreamPartitionsCallable()) + .thenReturn(mockGenerateInitialChangeStreamPartitionsCallable); - bigtableDataClient.listChangeStreamPartitions("fake-table"); + bigtableDataClient.generateInitialChangeStreamPartitions("fake-table"); - Mockito.verify(mockListChangeStreamPartitionsCallable).call("fake-table"); + Mockito.verify(mockGenerateInitialChangeStreamPartitionsCallable).call("fake-table"); } @Test - public void proxyListChangeStreamPartitionsAsyncTest() { - Mockito.when(mockStub.listChangeStreamPartitionsCallable()) - .thenReturn(mockListChangeStreamPartitionsCallable); + public void proxyGenerateInitialChangeStreamPartitionsAsyncTest() { + Mockito.when(mockStub.generateInitialChangeStreamPartitionsCallable()) + .thenReturn(mockGenerateInitialChangeStreamPartitionsCallable); @SuppressWarnings("unchecked") ResponseObserver mockObserver = Mockito.mock(ResponseObserver.class); - bigtableDataClient.listChangeStreamPartitionsAsync("fake-table", mockObserver); + bigtableDataClient.generateInitialChangeStreamPartitionsAsync("fake-table", mockObserver); - Mockito.verify(mockListChangeStreamPartitionsCallable).call("fake-table", mockObserver); + Mockito.verify(mockGenerateInitialChangeStreamPartitionsCallable) + .call("fake-table", mockObserver); } @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index fa2efbf7e0..731ba7f77e 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -701,7 +701,7 @@ public void isRefreshingChannelFalseValueTest() { "bulkReadRowsSettings", "checkAndMutateRowSettings", "readModifyWriteRowSettings", - "listChangeStreamPartitionsSettings", + "generateInitialChangeStreamPartitionsSettings", "readChangeStreamSettings", }; diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java similarity index 70% rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java index 03db35f8d6..908961be77 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ListChangeStreamPartitionsUserCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java @@ -17,8 +17,8 @@ import static com.google.common.truth.Truth.assertThat; -import com.google.bigtable.v2.ListChangeStreamPartitionsRequest; -import com.google.bigtable.v2.ListChangeStreamPartitionsResponse; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.StreamPartition; import com.google.cloud.bigtable.data.v2.internal.NameUtil; @@ -33,22 +33,24 @@ import org.junit.runners.JUnit4; @RunWith(JUnit4.class) -public class ListChangeStreamPartitionsUserCallableTest { +public class GenerateInitialChangeStreamPartitionsUserCallableTest { private final RequestContext requestContext = RequestContext.create("my-project", "my-instance", "my-profile"); @Test public void requestIsCorrect() { FakeStreamingApi.ServerStreamingStashCallable< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> inner = new FakeStreamingApi.ServerStreamingStashCallable<>(Lists.newArrayList()); - ListChangeStreamPartitionsUserCallable listChangeStreamPartitionsUserCallable = - new ListChangeStreamPartitionsUserCallable(inner, requestContext); + GenerateInitialChangeStreamPartitionsUserCallable + generateInitialChangeStreamPartitionsUserCallable = + new GenerateInitialChangeStreamPartitionsUserCallable(inner, requestContext); - listChangeStreamPartitionsUserCallable.all().call("my-table"); + generateInitialChangeStreamPartitionsUserCallable.all().call("my-table"); assertThat(inner.getActualRequest()) .isEqualTo( - ListChangeStreamPartitionsRequest.newBuilder() + GenerateInitialChangeStreamPartitionsRequest.newBuilder() .setTableName( NameUtil.formatTableName( requestContext.getProjectId(), requestContext.getInstanceId(), "my-table")) @@ -59,11 +61,12 @@ public void requestIsCorrect() { @Test public void responseIsConverted() { FakeStreamingApi.ServerStreamingStashCallable< - ListChangeStreamPartitionsRequest, ListChangeStreamPartitionsResponse> + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse> inner = new FakeStreamingApi.ServerStreamingStashCallable<>( Lists.newArrayList( - ListChangeStreamPartitionsResponse.newBuilder() + GenerateInitialChangeStreamPartitionsResponse.newBuilder() .setPartition( StreamPartition.newBuilder() .setRowRange( @@ -73,10 +76,12 @@ public void responseIsConverted() { .build()) .build()) .build())); - ListChangeStreamPartitionsUserCallable listChangeStreamPartitionsUserCallable = - new ListChangeStreamPartitionsUserCallable(inner, requestContext); + GenerateInitialChangeStreamPartitionsUserCallable + generateInitialChangeStreamPartitionsUserCallable = + new GenerateInitialChangeStreamPartitionsUserCallable(inner, requestContext); - List results = listChangeStreamPartitionsUserCallable.all().call("my-table"); + List results = + generateInitialChangeStreamPartitionsUserCallable.all().call("my-table"); Truth.assertThat(results) .containsExactly( RowRange.newBuilder() diff --git a/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java b/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java index f1cfa5c841..8ad1bae368 100644 --- a/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java +++ b/grpc-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableGrpc.java @@ -335,53 +335,57 @@ private BigtableGrpc() {} } private static volatile io.grpc.MethodDescriptor< - com.google.bigtable.v2.ListChangeStreamPartitionsRequest, - com.google.bigtable.v2.ListChangeStreamPartitionsResponse> - getListChangeStreamPartitionsMethod; + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse> + getGenerateInitialChangeStreamPartitionsMethod; @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "ListChangeStreamPartitions", - requestType = com.google.bigtable.v2.ListChangeStreamPartitionsRequest.class, - responseType = com.google.bigtable.v2.ListChangeStreamPartitionsResponse.class, + fullMethodName = SERVICE_NAME + '/' + "GenerateInitialChangeStreamPartitions", + requestType = com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest.class, + responseType = com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor< - com.google.bigtable.v2.ListChangeStreamPartitionsRequest, - com.google.bigtable.v2.ListChangeStreamPartitionsResponse> - getListChangeStreamPartitionsMethod() { + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse> + getGenerateInitialChangeStreamPartitionsMethod() { io.grpc.MethodDescriptor< - com.google.bigtable.v2.ListChangeStreamPartitionsRequest, - com.google.bigtable.v2.ListChangeStreamPartitionsResponse> - getListChangeStreamPartitionsMethod; - if ((getListChangeStreamPartitionsMethod = BigtableGrpc.getListChangeStreamPartitionsMethod) + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse> + getGenerateInitialChangeStreamPartitionsMethod; + if ((getGenerateInitialChangeStreamPartitionsMethod = + BigtableGrpc.getGenerateInitialChangeStreamPartitionsMethod) == null) { synchronized (BigtableGrpc.class) { - if ((getListChangeStreamPartitionsMethod = BigtableGrpc.getListChangeStreamPartitionsMethod) + if ((getGenerateInitialChangeStreamPartitionsMethod = + BigtableGrpc.getGenerateInitialChangeStreamPartitionsMethod) == null) { - BigtableGrpc.getListChangeStreamPartitionsMethod = - getListChangeStreamPartitionsMethod = + BigtableGrpc.getGenerateInitialChangeStreamPartitionsMethod = + getGenerateInitialChangeStreamPartitionsMethod = io.grpc.MethodDescriptor - . + . newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName( - generateFullMethodName(SERVICE_NAME, "ListChangeStreamPartitions")) + generateFullMethodName( + SERVICE_NAME, "GenerateInitialChangeStreamPartitions")) .setSampledToLocalTracing(true) .setRequestMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( - com.google.bigtable.v2.ListChangeStreamPartitionsRequest + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest .getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( - com.google.bigtable.v2.ListChangeStreamPartitionsResponse + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse .getDefaultInstance())) .setSchemaDescriptor( - new BigtableMethodDescriptorSupplier("ListChangeStreamPartitions")) + new BigtableMethodDescriptorSupplier( + "GenerateInitialChangeStreamPartitions")) .build(); } } } - return getListChangeStreamPartitionsMethod; + return getGenerateInitialChangeStreamPartitionsMethod; } private static volatile io.grpc.MethodDescriptor< @@ -600,12 +604,13 @@ public void readModifyWriteRow( * Partitions can be read with `ReadChangeStream`. * */ - public void listChangeStreamPartitions( - com.google.bigtable.v2.ListChangeStreamPartitionsRequest request, - io.grpc.stub.StreamObserver + public void generateInitialChangeStreamPartitions( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse> responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( - getListChangeStreamPartitionsMethod(), responseObserver); + getGenerateInitialChangeStreamPartitionsMethod(), responseObserver); } /** @@ -675,12 +680,12 @@ public final io.grpc.ServerServiceDefinition bindService() { com.google.bigtable.v2.ReadModifyWriteRowResponse>( this, METHODID_READ_MODIFY_WRITE_ROW))) .addMethod( - getListChangeStreamPartitionsMethod(), + getGenerateInitialChangeStreamPartitionsMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< - com.google.bigtable.v2.ListChangeStreamPartitionsRequest, - com.google.bigtable.v2.ListChangeStreamPartitionsResponse>( - this, METHODID_LIST_CHANGE_STREAM_PARTITIONS))) + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse>( + this, METHODID_GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS))) .addMethod( getReadChangeStreamMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( @@ -843,12 +848,13 @@ public void readModifyWriteRow( * Partitions can be read with `ReadChangeStream`. * */ - public void listChangeStreamPartitions( - com.google.bigtable.v2.ListChangeStreamPartitionsRequest request, - io.grpc.stub.StreamObserver + public void generateInitialChangeStreamPartitions( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest request, + io.grpc.stub.StreamObserver< + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse> responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( - getChannel().newCall(getListChangeStreamPartitionsMethod(), getCallOptions()), + getChannel().newCall(getGenerateInitialChangeStreamPartitionsMethod(), getCallOptions()), request, responseObserver); } @@ -1008,11 +1014,14 @@ public com.google.bigtable.v2.ReadModifyWriteRowResponse readModifyWriteRow( * Partitions can be read with `ReadChangeStream`. * */ - public java.util.Iterator - listChangeStreamPartitions( - com.google.bigtable.v2.ListChangeStreamPartitionsRequest request) { + public java.util.Iterator + generateInitialChangeStreamPartitions( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( - getChannel(), getListChangeStreamPartitionsMethod(), getCallOptions(), request); + getChannel(), + getGenerateInitialChangeStreamPartitionsMethod(), + getCallOptions(), + request); } /** @@ -1120,7 +1129,7 @@ protected BigtableFutureStub build(io.grpc.Channel channel, io.grpc.CallOptions private static final int METHODID_CHECK_AND_MUTATE_ROW = 4; private static final int METHODID_PING_AND_WARM = 5; private static final int METHODID_READ_MODIFY_WRITE_ROW = 6; - private static final int METHODID_LIST_CHANGE_STREAM_PARTITIONS = 7; + private static final int METHODID_GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS = 7; private static final int METHODID_READ_CHANGE_STREAM = 8; private static final class MethodHandlers @@ -1182,11 +1191,11 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv (io.grpc.stub.StreamObserver) responseObserver); break; - case METHODID_LIST_CHANGE_STREAM_PARTITIONS: - serviceImpl.listChangeStreamPartitions( - (com.google.bigtable.v2.ListChangeStreamPartitionsRequest) request, + case METHODID_GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS: + serviceImpl.generateInitialChangeStreamPartitions( + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) request, (io.grpc.stub.StreamObserver< - com.google.bigtable.v2.ListChangeStreamPartitionsResponse>) + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse>) responseObserver); break; case METHODID_READ_CHANGE_STREAM: @@ -1264,7 +1273,7 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() { .addMethod(getCheckAndMutateRowMethod()) .addMethod(getPingAndWarmMethod()) .addMethod(getReadModifyWriteRowMethod()) - .addMethod(getListChangeStreamPartitionsMethod()) + .addMethod(getGenerateInitialChangeStreamPartitionsMethod()) .addMethod(getReadChangeStreamMethod()) .build(); } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java index e37fe2f8bb..5de4d1ecd5 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java @@ -96,13 +96,13 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_bigtable_v2_ReadModifyWriteRowResponse_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable; + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable; + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -202,141 +202,143 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\002\022;\n\005rules\030\003 \003(\0132\'.google.bigtable.v2.Re" + "adModifyWriteRuleB\003\340A\002\"B\n\032ReadModifyWrit" + "eRowResponse\022$\n\003row\030\001 \001(\0132\027.google.bigta" - + "ble.v2.Row\"{\n!ListChangeStreamPartitions" - + "Request\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"big" - + "tableadmin.googleapis.com/Table\022\026\n\016app_p" - + "rofile_id\030\002 \001(\t\"\\\n\"ListChangeStreamParti" - + "tionsResponse\0226\n\tpartition\030\001 \001(\0132#.googl" - + "e.bigtable.v2.StreamPartition\"\233\003\n\027ReadCh" - + "angeStreamRequest\022>\n\ntable_name\030\001 \001(\tB*\340" - + "A\002\372A$\n\"bigtableadmin.googleapis.com/Tabl" - + "e\022\026\n\016app_profile_id\030\002 \001(\t\0226\n\tpartition\030\003" - + " \001(\0132#.google.bigtable.v2.StreamPartitio" - + "n\0220\n\nstart_time\030\004 \001(\0132\032.google.protobuf." - + "TimestampH\000\022K\n\023continuation_tokens\030\006 \001(\013" - + "2,.google.bigtable.v2.StreamContinuation" - + "TokensH\000\022,\n\010end_time\030\005 \001(\0132\032.google.prot" - + "obuf.Timestamp\0225\n\022heartbeat_duration\030\007 \001" - + "(\0132\031.google.protobuf.DurationB\014\n\nstart_f" - + "rom\"\327\t\n\030ReadChangeStreamResponse\022N\n\013data" - + "_change\030\001 \001(\01327.google.bigtable.v2.ReadC" - + "hangeStreamResponse.DataChangeH\000\022K\n\thear" - + "tbeat\030\002 \001(\01326.google.bigtable.v2.ReadCha" - + "ngeStreamResponse.HeartbeatH\000\022P\n\014close_s" - + "tream\030\003 \001(\01328.google.bigtable.v2.ReadCha" - + "ngeStreamResponse.CloseStreamH\000\032\364\001\n\rMuta" - + "tionChunk\022X\n\nchunk_info\030\001 \001(\0132D.google.b" - + "igtable.v2.ReadChangeStreamResponse.Muta" - + "tionChunk.ChunkInfo\022.\n\010mutation\030\002 \001(\0132\034." - + "google.bigtable.v2.Mutation\032Y\n\tChunkInfo" - + "\022\032\n\022chunked_value_size\030\001 \001(\005\022\034\n\024chunked_" - + "value_offset\030\002 \001(\005\022\022\n\nlast_chunk\030\003 \001(\010\032\274" - + "\003\n\nDataChange\022J\n\004type\030\001 \001(\0162<.google.big" - + "table.v2.ReadChangeStreamResponse.DataCh" - + "ange.Type\022\031\n\021source_cluster_id\030\002 \001(\t\022\017\n\007" - + "row_key\030\003 \001(\014\0224\n\020commit_timestamp\030\004 \001(\0132" - + "\032.google.protobuf.Timestamp\022\022\n\ntiebreake" - + "r\030\005 \001(\005\022J\n\006chunks\030\006 \003(\0132:.google.bigtabl" - + "e.v2.ReadChangeStreamResponse.MutationCh" - + "unk\022\014\n\004done\030\010 \001(\010\022\r\n\005token\030\t \001(\t\0221\n\rlow_" - + "watermark\030\n \001(\0132\032.google.protobuf.Timest" - + "amp\"P\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\010\n\004USE" - + "R\020\001\022\026\n\022GARBAGE_COLLECTION\020\002\022\020\n\014CONTINUAT" - + "ION\020\003\032\207\001\n\tHeartbeat\022G\n\022continuation_toke" - + "n\030\001 \001(\0132+.google.bigtable.v2.StreamConti" - + "nuationToken\0221\n\rlow_watermark\030\002 \001(\0132\032.go" - + "ogle.protobuf.Timestamp\032{\n\013CloseStream\022\"" - + "\n\006status\030\001 \001(\0132\022.google.rpc.Status\022H\n\023co" - + "ntinuation_tokens\030\002 \003(\0132+.google.bigtabl" - + "e.v2.StreamContinuationTokenB\017\n\rstream_r" - + "ecord2\252\030\n\010Bigtable\022\233\002\n\010ReadRows\022#.google" - + ".bigtable.v2.ReadRowsRequest\032$.google.bi" - + "gtable.v2.ReadRowsResponse\"\301\001\202\323\344\223\002>\"9/v2" - + "/{table_name=projects/*/instances/*/tabl" - + "es/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{" - + "table_name=projects/*/instances/*/tables" - + "/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031tab" - + "le_name,app_profile_id0\001\022\254\002\n\rSampleRowKe" - + "ys\022(.google.bigtable.v2.SampleRowKeysReq" - + "uest\032).google.bigtable.v2.SampleRowKeysR" - + "esponse\"\303\001\202\323\344\223\002@\022>/v2/{table_name=projec" - + "ts/*/instances/*/tables/*}:sampleRowKeys" + + "ble.v2.Row\"\206\001\n,GenerateInitialChangeStre" + + "amPartitionsRequest\022>\n\ntable_name\030\001 \001(\tB" + + "*\340A\002\372A$\n\"bigtableadmin.googleapis.com/Ta" + + "ble\022\026\n\016app_profile_id\030\002 \001(\t\"g\n-GenerateI" + + "nitialChangeStreamPartitionsResponse\0226\n\t" + + "partition\030\001 \001(\0132#.google.bigtable.v2.Str" + + "eamPartition\"\233\003\n\027ReadChangeStreamRequest" + + "\022>\n\ntable_name\030\001 \001(\tB*\340A\002\372A$\n\"bigtablead" + + "min.googleapis.com/Table\022\026\n\016app_profile_" + + "id\030\002 \001(\t\0226\n\tpartition\030\003 \001(\0132#.google.big" + + "table.v2.StreamPartition\0220\n\nstart_time\030\004" + + " \001(\0132\032.google.protobuf.TimestampH\000\022K\n\023co" + + "ntinuation_tokens\030\006 \001(\0132,.google.bigtabl" + + "e.v2.StreamContinuationTokensH\000\022,\n\010end_t" + + "ime\030\005 \001(\0132\032.google.protobuf.Timestamp\0225\n" + + "\022heartbeat_duration\030\007 \001(\0132\031.google.proto" + + "buf.DurationB\014\n\nstart_from\"\327\t\n\030ReadChang" + + "eStreamResponse\022N\n\013data_change\030\001 \001(\01327.g" + + "oogle.bigtable.v2.ReadChangeStreamRespon" + + "se.DataChangeH\000\022K\n\theartbeat\030\002 \001(\01326.goo" + + "gle.bigtable.v2.ReadChangeStreamResponse" + + ".HeartbeatH\000\022P\n\014close_stream\030\003 \001(\01328.goo" + + "gle.bigtable.v2.ReadChangeStreamResponse" + + ".CloseStreamH\000\032\364\001\n\rMutationChunk\022X\n\nchun" + + "k_info\030\001 \001(\0132D.google.bigtable.v2.ReadCh" + + "angeStreamResponse.MutationChunk.ChunkIn" + + "fo\022.\n\010mutation\030\002 \001(\0132\034.google.bigtable.v" + + "2.Mutation\032Y\n\tChunkInfo\022\032\n\022chunked_value" + + "_size\030\001 \001(\005\022\034\n\024chunked_value_offset\030\002 \001(" + + "\005\022\022\n\nlast_chunk\030\003 \001(\010\032\274\003\n\nDataChange\022J\n\004" + + "type\030\001 \001(\0162<.google.bigtable.v2.ReadChan" + + "geStreamResponse.DataChange.Type\022\031\n\021sour" + + "ce_cluster_id\030\002 \001(\t\022\017\n\007row_key\030\003 \001(\014\0224\n\020" + + "commit_timestamp\030\004 \001(\0132\032.google.protobuf" + + ".Timestamp\022\022\n\ntiebreaker\030\005 \001(\005\022J\n\006chunks" + + "\030\006 \003(\0132:.google.bigtable.v2.ReadChangeSt" + + "reamResponse.MutationChunk\022\014\n\004done\030\010 \001(\010" + + "\022\r\n\005token\030\t \001(\t\0221\n\rlow_watermark\030\n \001(\0132\032" + + ".google.protobuf.Timestamp\"P\n\004Type\022\024\n\020TY" + + "PE_UNSPECIFIED\020\000\022\010\n\004USER\020\001\022\026\n\022GARBAGE_CO" + + "LLECTION\020\002\022\020\n\014CONTINUATION\020\003\032\207\001\n\tHeartbe" + + "at\022G\n\022continuation_token\030\001 \001(\0132+.google." + + "bigtable.v2.StreamContinuationToken\0221\n\rl" + + "ow_watermark\030\002 \001(\0132\032.google.protobuf.Tim" + + "estamp\032{\n\013CloseStream\022\"\n\006status\030\001 \001(\0132\022." + + "google.rpc.Status\022H\n\023continuation_tokens" + + "\030\002 \003(\0132+.google.bigtable.v2.StreamContin" + + "uationTokenB\017\n\rstream_record2\327\030\n\010Bigtabl" + + "e\022\233\002\n\010ReadRows\022#.google.bigtable.v2.Read" + + "RowsRequest\032$.google.bigtable.v2.ReadRow" + + "sResponse\"\301\001\202\323\344\223\002>\"9/v2/{table_name=proj" + + "ects/*/instances/*/tables/*}:readRows:\001*" + "\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projec" + "ts/*/instances/*/tables/*}\022\020\n\016app_profil" + "e_id\332A\ntable_name\332A\031table_name,app_profi" - + "le_id0\001\022\301\002\n\tMutateRow\022$.google.bigtable." - + "v2.MutateRowRequest\032%.google.bigtable.v2" - + ".MutateRowResponse\"\346\001\202\323\344\223\002?\":/v2/{table_" - + "name=projects/*/instances/*/tables/*}:mu" - + "tateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_n" - + "ame=projects/*/instances/*/tables/*}\022\020\n\016" - + "app_profile_id\332A\034table_name,row_key,muta" - + "tions\332A+table_name,row_key,mutations,app" - + "_profile_id\022\263\002\n\nMutateRows\022%.google.bigt" - + "able.v2.MutateRowsRequest\032&.google.bigta" - + "ble.v2.MutateRowsResponse\"\323\001\202\323\344\223\002@\";/v2/" - + "{table_name=projects/*/instances/*/table" - + "s/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022," - + "{table_name=projects/*/instances/*/table" - + "s/*}\022\020\n\016app_profile_id\332A\022table_name,entr" - + "ies\332A!table_name,entries,app_profile_id0" - + "\001\022\255\003\n\021CheckAndMutateRow\022,.google.bigtabl" - + "e.v2.CheckAndMutateRowRequest\032-.google.b" - + "igtable.v2.CheckAndMutateRowResponse\"\272\002\202" - + "\323\344\223\002G\"B/v2/{table_name=projects/*/instan" - + "ces/*/tables/*}:checkAndMutateRow:\001*\212\323\344\223" - + "\002N\022:\n\ntable_name\022,{table_name=projects/*" - + "/instances/*/tables/*}\022\020\n\016app_profile_id" - + "\332ABtable_name,row_key,predicate_filter,t" - + "rue_mutations,false_mutations\332AQtable_na" - + "me,row_key,predicate_filter,true_mutatio" - + "ns,false_mutations,app_profile_id\022\356\001\n\013Pi" - + "ngAndWarm\022&.google.bigtable.v2.PingAndWa" - + "rmRequest\032\'.google.bigtable.v2.PingAndWa" - + "rmResponse\"\215\001\202\323\344\223\002+\"&/v2/{name=projects/" - + "*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{n" - + "ame=projects/*/instances/*}\022\020\n\016app_profi" - + "le_id\332A\004name\332A\023name,app_profile_id\022\335\002\n\022R" - + "eadModifyWriteRow\022-.google.bigtable.v2.R" - + "eadModifyWriteRowRequest\032..google.bigtab" - + "le.v2.ReadModifyWriteRowResponse\"\347\001\202\323\344\223\002" - + "H\"C/v2/{table_name=projects/*/instances/" - + "*/tables/*}:readModifyWriteRow:\001*\212\323\344\223\002N\022" + + "le_id0\001\022\254\002\n\rSampleRowKeys\022(.google.bigta" + + "ble.v2.SampleRowKeysRequest\032).google.big" + + "table.v2.SampleRowKeysResponse\"\303\001\202\323\344\223\002@\022" + + ">/v2/{table_name=projects/*/instances/*/" + + "tables/*}:sampleRowKeys\212\323\344\223\002N\022:\n\ntable_n" + + "ame\022,{table_name=projects/*/instances/*/" + + "tables/*}\022\020\n\016app_profile_id\332A\ntable_name" + + "\332A\031table_name,app_profile_id0\001\022\301\002\n\tMutat" + + "eRow\022$.google.bigtable.v2.MutateRowReque" + + "st\032%.google.bigtable.v2.MutateRowRespons" + + "e\"\346\001\202\323\344\223\002?\":/v2/{table_name=projects/*/i" + + "nstances/*/tables/*}:mutateRow:\001*\212\323\344\223\002N\022" + ":\n\ntable_name\022,{table_name=projects/*/in" - + "stances/*/tables/*}\022\020\n\016app_profile_id\332A\030" - + "table_name,row_key,rules\332A\'table_name,ro" - + "w_key,rules,app_profile_id\022\216\002\n\032ListChang" - + "eStreamPartitions\0225.google.bigtable.v2.L" - + "istChangeStreamPartitionsRequest\0326.googl" - + "e.bigtable.v2.ListChangeStreamPartitions" - + "Response\"\177\202\323\344\223\002P\"K/v2/{table_name=projec" - + "ts/*/instances/*/tables/*}:listChangeStr" - + "eamPartitions:\001*\332A\ntable_name\332A\031table_na" - + "me,app_profile_id0\001\022\346\001\n\020ReadChangeStream" - + "\022+.google.bigtable.v2.ReadChangeStreamRe" - + "quest\032,.google.bigtable.v2.ReadChangeStr" - + "eamResponse\"u\202\323\344\223\002F\"A/v2/{table_name=pro" - + "jects/*/instances/*/tables/*}:readChange" - + "Stream:\001*\332A\ntable_name\332A\031table_name,app_" - + "profile_id0\001\032\333\002\312A\027bigtable.googleapis.co" - + "m\322A\275\002https://www.googleapis.com/auth/big" - + "table.data,https://www.googleapis.com/au" - + "th/bigtable.data.readonly,https://www.go" - + "ogleapis.com/auth/cloud-bigtable.data,ht" - + "tps://www.googleapis.com/auth/cloud-bigt" - + "able.data.readonly,https://www.googleapi" - + "s.com/auth/cloud-platform,https://www.go" - + "ogleapis.com/auth/cloud-platform.read-on" - + "lyB\353\002\n\026com.google.bigtable.v2B\rBigtableP" - + "rotoP\001Z:google.golang.org/genproto/googl" - + "eapis/bigtable/v2;bigtable\252\002\030Google.Clou" - + "d.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" - + "\352\002\033Google::Cloud::Bigtable::V2\352A\\\n\"bigta" - + "bleadmin.googleapis.com/Table\0226projects/" - + "{project}/instances/{instance}/tables/{t" - + "able}\352AP\n%bigtableadmin.googleapis.com/I" - + "nstance\022\'projects/{project}/instances/{i" - + "nstance}b\006proto3" + + "stances/*/tables/*}\022\020\n\016app_profile_id\332A\034" + + "table_name,row_key,mutations\332A+table_nam" + + "e,row_key,mutations,app_profile_id\022\263\002\n\nM" + + "utateRows\022%.google.bigtable.v2.MutateRow" + + "sRequest\032&.google.bigtable.v2.MutateRows" + + "Response\"\323\001\202\323\344\223\002@\";/v2/{table_name=proje" + + "cts/*/instances/*/tables/*}:mutateRows:\001" + + "*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=proje" + + "cts/*/instances/*/tables/*}\022\020\n\016app_profi" + + "le_id\332A\022table_name,entries\332A!table_name," + + "entries,app_profile_id0\001\022\255\003\n\021CheckAndMut" + + "ateRow\022,.google.bigtable.v2.CheckAndMuta" + + "teRowRequest\032-.google.bigtable.v2.CheckA" + + "ndMutateRowResponse\"\272\002\202\323\344\223\002G\"B/v2/{table" + + "_name=projects/*/instances/*/tables/*}:c" + + "heckAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022" + + ",{table_name=projects/*/instances/*/tabl" + + "es/*}\022\020\n\016app_profile_id\332ABtable_name,row" + + "_key,predicate_filter,true_mutations,fal" + + "se_mutations\332AQtable_name,row_key,predic" + + "ate_filter,true_mutations,false_mutation" + + "s,app_profile_id\022\356\001\n\013PingAndWarm\022&.googl" + + "e.bigtable.v2.PingAndWarmRequest\032\'.googl" + + "e.bigtable.v2.PingAndWarmResponse\"\215\001\202\323\344\223" + + "\002+\"&/v2/{name=projects/*/instances/*}:pi" + + "ng:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/in" + + "stances/*}\022\020\n\016app_profile_id\332A\004name\332A\023na" + + "me,app_profile_id\022\335\002\n\022ReadModifyWriteRow" + + "\022-.google.bigtable.v2.ReadModifyWriteRow" + + "Request\032..google.bigtable.v2.ReadModifyW" + + "riteRowResponse\"\347\001\202\323\344\223\002H\"C/v2/{table_nam" + + "e=projects/*/instances/*/tables/*}:readM" + + "odifyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{t" + + "able_name=projects/*/instances/*/tables/" + + "*}\022\020\n\016app_profile_id\332A\030table_name,row_ke" + + "y,rules\332A\'table_name,row_key,rules,app_p" + + "rofile_id\022\273\002\n%GenerateInitialChangeStrea" + + "mPartitions\022@.google.bigtable.v2.Generat" + + "eInitialChangeStreamPartitionsRequest\032A." + + "google.bigtable.v2.GenerateInitialChange" + + "StreamPartitionsResponse\"\212\001\202\323\344\223\002[\"V/v2/{" + + "table_name=projects/*/instances/*/tables" + + "/*}:generateInitialChangeStreamPartition" + + "s:\001*\332A\ntable_name\332A\031table_name,app_profi" + + "le_id0\001\022\346\001\n\020ReadChangeStream\022+.google.bi" + + "gtable.v2.ReadChangeStreamRequest\032,.goog" + + "le.bigtable.v2.ReadChangeStreamResponse\"" + + "u\202\323\344\223\002F\"A/v2/{table_name=projects/*/inst" + + "ances/*/tables/*}:readChangeStream:\001*\332A\n" + + "table_name\332A\031table_name,app_profile_id0\001" + + "\032\333\002\312A\027bigtable.googleapis.com\322A\275\002https:/" + + "/www.googleapis.com/auth/bigtable.data,h" + + "ttps://www.googleapis.com/auth/bigtable." + + "data.readonly,https://www.googleapis.com" + + "/auth/cloud-bigtable.data,https://www.go" + + "ogleapis.com/auth/cloud-bigtable.data.re" + + "adonly,https://www.googleapis.com/auth/c" + + "loud-platform,https://www.googleapis.com" + + "/auth/cloud-platform.read-onlyB\353\002\n\026com.g" + + "oogle.bigtable.v2B\rBigtableProtoP\001Z:goog" + + "le.golang.org/genproto/googleapis/bigtab" + + "le/v2;bigtable\252\002\030Google.Cloud.Bigtable.V" + + "2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::C" + + "loud::Bigtable::V2\352A\\\n\"bigtableadmin.goo" + + "gleapis.com/Table\0226projects/{project}/in" + + "stances/{instance}/tables/{table}\352AP\n%bi" + + "gtableadmin.googleapis.com/Instance\022\'pro" + + "jects/{project}/instances/{instance}b\006pr" + + "oto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -499,19 +501,19 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Row", }); - internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor = + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_descriptor = getDescriptor().getMessageTypes().get(14); - internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable = + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor, + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_descriptor, new java.lang.String[] { "TableName", "AppProfileId", }); - internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor = + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_descriptor = getDescriptor().getMessageTypes().get(15); - internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable = + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor, + internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_descriptor, new java.lang.String[] { "Partition", }); diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsRequest.java similarity index 62% rename from proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java rename to proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsRequest.java index 38beeb41dd..b426e994c4 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequest.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsRequest.java @@ -23,45 +23,46 @@ * *
      * NOTE: This API is not generally available. Users must be allowlisted.
    - * Request message for Bigtable.ListChangeStreamPartitions.
    + * Request message for Bigtable.GenerateInitialChangeStreamPartitions.
      * 
    * - * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsRequest} + * Protobuf type {@code google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest} */ -public final class ListChangeStreamPartitionsRequest extends com.google.protobuf.GeneratedMessageV3 +public final class GenerateInitialChangeStreamPartitionsRequest + extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:google.bigtable.v2.ListChangeStreamPartitionsRequest) - ListChangeStreamPartitionsRequestOrBuilder { + // @@protoc_insertion_point(message_implements:google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) + GenerateInitialChangeStreamPartitionsRequestOrBuilder { private static final long serialVersionUID = 0L; - // Use ListChangeStreamPartitionsRequest.newBuilder() to construct. - private ListChangeStreamPartitionsRequest( + // Use GenerateInitialChangeStreamPartitionsRequest.newBuilder() to construct. + private GenerateInitialChangeStreamPartitionsRequest( com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private ListChangeStreamPartitionsRequest() { + private GenerateInitialChangeStreamPartitionsRequest() { tableName_ = ""; appProfileId_ = ""; } - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { - return new ListChangeStreamPartitionsRequest(); + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GenerateInitialChangeStreamPartitionsRequest(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } - private ListChangeStreamPartitionsRequest( + private GenerateInitialChangeStreamPartitionsRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -75,14 +76,14 @@ private ListChangeStreamPartitionsRequest( break; case 10: { - String s = input.readStringRequireUtf8(); + java.lang.String s = input.readStringRequireUtf8(); tableName_ = s; break; } case 18: { - String s = input.readStringRequireUtf8(); + java.lang.String s = input.readStringRequireUtf8(); appProfileId_ = s; break; @@ -109,25 +110,28 @@ private ListChangeStreamPartitionsRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsRequest.class, Builder.class); + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest.class, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest.Builder.class); } public static final int TABLE_NAME_FIELD_NUMBER = 1; - private volatile Object tableName_; + private volatile java.lang.Object tableName_; /** * * *
    -   * Required. The unique name of the table from which to get change stream
    -   * partitions. Values are of the form
    +   * Required. The unique name of the table from which to get change stream partitions.
    +   * Values are of the form
        * `projects/<project>/instances/<instance>/tables/<table>`.
        * Change streaming must be enabled on the table.
        * 
    @@ -138,14 +142,14 @@ protected FieldAccessorTable internalGetFieldAccessorTable() { * * @return The tableName. */ - @Override - public String getTableName() { - Object ref = tableName_; - if (ref instanceof String) { - return (String) ref; + @java.lang.Override + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); tableName_ = s; return s; } @@ -154,8 +158,8 @@ public String getTableName() { * * *
    -   * Required. The unique name of the table from which to get change stream
    -   * partitions. Values are of the form
    +   * Required. The unique name of the table from which to get change stream partitions.
    +   * Values are of the form
        * `projects/<project>/instances/<instance>/tables/<table>`.
        * Change streaming must be enabled on the table.
        * 
    @@ -166,11 +170,12 @@ public String getTableName() { * * @return The bytes for tableName. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getTableNameBytes() { - Object ref = tableName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); tableName_ = b; return b; } else { @@ -179,7 +184,7 @@ public com.google.protobuf.ByteString getTableNameBytes() { } public static final int APP_PROFILE_ID_FIELD_NUMBER = 2; - private volatile Object appProfileId_; + private volatile java.lang.Object appProfileId_; /** * * @@ -193,14 +198,14 @@ public com.google.protobuf.ByteString getTableNameBytes() { * * @return The appProfileId. */ - @Override - public String getAppProfileId() { - Object ref = appProfileId_; - if (ref instanceof String) { - return (String) ref; + @java.lang.Override + public java.lang.String getAppProfileId() { + java.lang.Object ref = appProfileId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); appProfileId_ = s; return s; } @@ -218,11 +223,12 @@ public String getAppProfileId() { * * @return The bytes for appProfileId. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getAppProfileIdBytes() { - Object ref = appProfileId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + java.lang.Object ref = appProfileId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); appProfileId_ = b; return b; } else { @@ -232,7 +238,7 @@ public com.google.protobuf.ByteString getAppProfileIdBytes() { private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -242,7 +248,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tableName_); @@ -253,7 +259,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -270,15 +276,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof ListChangeStreamPartitionsRequest)) { + if (!(obj instanceof com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest)) { return super.equals(obj); } - ListChangeStreamPartitionsRequest other = (ListChangeStreamPartitionsRequest) obj; + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest other = + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) obj; if (!getTableName().equals(other.getTableName())) return false; if (!getAppProfileId().equals(other.getAppProfileId())) return false; @@ -286,7 +293,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -302,70 +309,72 @@ public int hashCode() { return hash; } - public static ListChangeStreamPartitionsRequest parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ListChangeStreamPartitionsRequest parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ListChangeStreamPartitionsRequest parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ListChangeStreamPartitionsRequest parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ListChangeStreamPartitionsRequest parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ListChangeStreamPartitionsRequest parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ListChangeStreamPartitionsRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ListChangeStreamPartitionsRequest parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static ListChangeStreamPartitionsRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static ListChangeStreamPartitionsRequest parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static ListChangeStreamPartitionsRequest parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ListChangeStreamPartitionsRequest parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -373,7 +382,7 @@ public static ListChangeStreamPartitionsRequest parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -382,17 +391,18 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(ListChangeStreamPartitionsRequest prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -401,33 +411,37 @@ protected Builder newBuilderForType(BuilderParent parent) { * *
        * NOTE: This API is not generally available. Users must be allowlisted.
    -   * Request message for Bigtable.ListChangeStreamPartitions.
    +   * Request message for Bigtable.GenerateInitialChangeStreamPartitions.
        * 
    * - * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsRequest} + * Protobuf type {@code google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ListChangeStreamPartitionsRequest) - ListChangeStreamPartitionsRequestOrBuilder { + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsRequest.class, Builder.class); + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest.class, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest.Builder.class); } - // Construct using com.google.bigtable.v2.ListChangeStreamPartitionsRequest.newBuilder() + // Construct using + // com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -436,7 +450,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); tableName_ = ""; @@ -446,79 +460,87 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsRequest_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsRequest_descriptor; } - @Override - public ListChangeStreamPartitionsRequest getDefaultInstanceForType() { - return ListChangeStreamPartitionsRequest.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + getDefaultInstanceForType() { + return com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + .getDefaultInstance(); } - @Override - public ListChangeStreamPartitionsRequest build() { - ListChangeStreamPartitionsRequest result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest build() { + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public ListChangeStreamPartitionsRequest buildPartial() { - ListChangeStreamPartitionsRequest result = new ListChangeStreamPartitionsRequest(this); + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest buildPartial() { + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest result = + new com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest(this); result.tableName_ = tableName_; result.appProfileId_ = appProfileId_; onBuilt(); return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof ListChangeStreamPartitionsRequest) { - return mergeFrom((ListChangeStreamPartitionsRequest) other); + if (other instanceof com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) { + return mergeFrom( + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(ListChangeStreamPartitionsRequest other) { - if (other == ListChangeStreamPartitionsRequest.getDefaultInstance()) return this; + public Builder mergeFrom( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest other) { + if (other + == com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + .getDefaultInstance()) return this; if (!other.getTableName().isEmpty()) { tableName_ = other.tableName_; onChanged(); @@ -532,21 +554,23 @@ public Builder mergeFrom(ListChangeStreamPartitionsRequest other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - ListChangeStreamPartitionsRequest parsedMessage = null; + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (ListChangeStreamPartitionsRequest) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) + e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -556,13 +580,13 @@ public Builder mergeFrom( return this; } - private Object tableName_ = ""; + private java.lang.Object tableName_ = ""; /** * * *
    -     * Required. The unique name of the table from which to get change stream
    -     * partitions. Values are of the form
    +     * Required. The unique name of the table from which to get change stream partitions.
    +     * Values are of the form
          * `projects/<project>/instances/<instance>/tables/<table>`.
          * Change streaming must be enabled on the table.
          * 
    @@ -573,23 +597,23 @@ public Builder mergeFrom( * * @return The tableName. */ - public String getTableName() { - Object ref = tableName_; - if (!(ref instanceof String)) { + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); tableName_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } /** * * *
    -     * Required. The unique name of the table from which to get change stream
    -     * partitions. Values are of the form
    +     * Required. The unique name of the table from which to get change stream partitions.
    +     * Values are of the form
          * `projects/<project>/instances/<instance>/tables/<table>`.
          * Change streaming must be enabled on the table.
          * 
    @@ -601,10 +625,10 @@ public String getTableName() { * @return The bytes for tableName. */ public com.google.protobuf.ByteString getTableNameBytes() { - Object ref = tableName_; + java.lang.Object ref = tableName_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); tableName_ = b; return b; } else { @@ -615,8 +639,8 @@ public com.google.protobuf.ByteString getTableNameBytes() { * * *
    -     * Required. The unique name of the table from which to get change stream
    -     * partitions. Values are of the form
    +     * Required. The unique name of the table from which to get change stream partitions.
    +     * Values are of the form
          * `projects/<project>/instances/<instance>/tables/<table>`.
          * Change streaming must be enabled on the table.
          * 
    @@ -628,7 +652,7 @@ public com.google.protobuf.ByteString getTableNameBytes() { * @param value The tableName to set. * @return This builder for chaining. */ - public Builder setTableName(String value) { + public Builder setTableName(java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -641,8 +665,8 @@ public Builder setTableName(String value) { * * *
    -     * Required. The unique name of the table from which to get change stream
    -     * partitions. Values are of the form
    +     * Required. The unique name of the table from which to get change stream partitions.
    +     * Values are of the form
          * `projects/<project>/instances/<instance>/tables/<table>`.
          * Change streaming must be enabled on the table.
          * 
    @@ -663,8 +687,8 @@ public Builder clearTableName() { * * *
    -     * Required. The unique name of the table from which to get change stream
    -     * partitions. Values are of the form
    +     * Required. The unique name of the table from which to get change stream partitions.
    +     * Values are of the form
          * `projects/<project>/instances/<instance>/tables/<table>`.
          * Change streaming must be enabled on the table.
          * 
    @@ -687,7 +711,7 @@ public Builder setTableNameBytes(com.google.protobuf.ByteString value) { return this; } - private Object appProfileId_ = ""; + private java.lang.Object appProfileId_ = ""; /** * * @@ -701,15 +725,15 @@ public Builder setTableNameBytes(com.google.protobuf.ByteString value) { * * @return The appProfileId. */ - public String getAppProfileId() { - Object ref = appProfileId_; - if (!(ref instanceof String)) { + public java.lang.String getAppProfileId() { + java.lang.Object ref = appProfileId_; + if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); appProfileId_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } /** @@ -726,10 +750,10 @@ public String getAppProfileId() { * @return The bytes for appProfileId. */ public com.google.protobuf.ByteString getAppProfileIdBytes() { - Object ref = appProfileId_; + java.lang.Object ref = appProfileId_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); appProfileId_ = b; return b; } else { @@ -750,7 +774,7 @@ public com.google.protobuf.ByteString getAppProfileIdBytes() { * @param value The appProfileId to set. * @return This builder for chaining. */ - public Builder setAppProfileId(String value) { + public Builder setAppProfileId(java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -803,53 +827,58 @@ public Builder setAppProfileIdBytes(com.google.protobuf.ByteString value) { return this; } - @Override + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } - // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ListChangeStreamPartitionsRequest) + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) } - // @@protoc_insertion_point(class_scope:google.bigtable.v2.ListChangeStreamPartitionsRequest) - private static final ListChangeStreamPartitionsRequest DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) + private static final com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new ListChangeStreamPartitionsRequest(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest(); } - public static ListChangeStreamPartitionsRequest getDefaultInstance() { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + getDefaultInstance() { return DEFAULT_INSTANCE; } - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @Override - public ListChangeStreamPartitionsRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ListChangeStreamPartitionsRequest(input, extensionRegistry); - } - }; + private static final com.google.protobuf.Parser + PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GenerateInitialChangeStreamPartitionsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GenerateInitialChangeStreamPartitionsRequest(input, extensionRegistry); + } + }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } - @Override - public com.google.protobuf.Parser getParserForType() { + @java.lang.Override + public com.google.protobuf.Parser + getParserForType() { return PARSER; } - @Override - public ListChangeStreamPartitionsRequest getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsRequestOrBuilder.java similarity index 89% rename from proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java rename to proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsRequestOrBuilder.java index 741730e983..9fd0307e59 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsRequestOrBuilder.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsRequestOrBuilder.java @@ -18,17 +18,17 @@ package com.google.bigtable.v2; -public interface ListChangeStreamPartitionsRequestOrBuilder +public interface GenerateInitialChangeStreamPartitionsRequestOrBuilder extends - // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ListChangeStreamPartitionsRequest) + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest) com.google.protobuf.MessageOrBuilder { /** * * *
    -   * Required. The unique name of the table from which to get change stream
    -   * partitions. Values are of the form
    +   * Required. The unique name of the table from which to get change stream partitions.
    +   * Values are of the form
        * `projects/<project>/instances/<instance>/tables/<table>`.
        * Change streaming must be enabled on the table.
        * 
    @@ -39,13 +39,13 @@ public interface ListChangeStreamPartitionsRequestOrBuilder * * @return The tableName. */ - String getTableName(); + java.lang.String getTableName(); /** * * *
    -   * Required. The unique name of the table from which to get change stream
    -   * partitions. Values are of the form
    +   * Required. The unique name of the table from which to get change stream partitions.
    +   * Values are of the form
        * `projects/<project>/instances/<instance>/tables/<table>`.
        * Change streaming must be enabled on the table.
        * 
    @@ -71,7 +71,7 @@ public interface ListChangeStreamPartitionsRequestOrBuilder * * @return The appProfileId. */ - String getAppProfileId(); + java.lang.String getAppProfileId(); /** * * diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsResponse.java similarity index 64% rename from proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java rename to proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsResponse.java index fd684952e7..6bfad03aa4 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponse.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsResponse.java @@ -23,42 +23,43 @@ * *
      * NOTE: This API is not generally available. Users must be allowlisted.
    - * Response message for Bigtable.ListChangeStreamPartitions.
    + * Response message for Bigtable.GenerateInitialChangeStreamPartitions.
      * 
    * - * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsResponse} + * Protobuf type {@code google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse} */ -public final class ListChangeStreamPartitionsResponse extends com.google.protobuf.GeneratedMessageV3 +public final class GenerateInitialChangeStreamPartitionsResponse + extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:google.bigtable.v2.ListChangeStreamPartitionsResponse) - ListChangeStreamPartitionsResponseOrBuilder { + // @@protoc_insertion_point(message_implements:google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) + GenerateInitialChangeStreamPartitionsResponseOrBuilder { private static final long serialVersionUID = 0L; - // Use ListChangeStreamPartitionsResponse.newBuilder() to construct. - private ListChangeStreamPartitionsResponse( + // Use GenerateInitialChangeStreamPartitionsResponse.newBuilder() to construct. + private GenerateInitialChangeStreamPartitionsResponse( com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private ListChangeStreamPartitionsResponse() {} + private GenerateInitialChangeStreamPartitionsResponse() {} - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { - return new ListChangeStreamPartitionsResponse(); + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GenerateInitialChangeStreamPartitionsResponse(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } - private ListChangeStreamPartitionsResponse( + private GenerateInitialChangeStreamPartitionsResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -108,15 +109,18 @@ private ListChangeStreamPartitionsResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsResponse.class, Builder.class); + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.class, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.Builder.class); } public static final int PARTITION_FIELD_NUMBER = 1; @@ -132,7 +136,7 @@ protected FieldAccessorTable internalGetFieldAccessorTable() { * * @return Whether the partition field is set. */ - @Override + @java.lang.Override public boolean hasPartition() { return partition_ != null; } @@ -147,7 +151,7 @@ public boolean hasPartition() { * * @return The partition. */ - @Override + @java.lang.Override public com.google.bigtable.v2.StreamPartition getPartition() { return partition_ == null ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() @@ -162,14 +166,14 @@ public com.google.bigtable.v2.StreamPartition getPartition() { * * .google.bigtable.v2.StreamPartition partition = 1; */ - @Override + @java.lang.Override public com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder() { return getPartition(); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -179,7 +183,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (partition_ != null) { output.writeMessage(1, getPartition()); @@ -187,7 +191,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -201,15 +205,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof ListChangeStreamPartitionsResponse)) { + if (!(obj instanceof com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse)) { return super.equals(obj); } - ListChangeStreamPartitionsResponse other = (ListChangeStreamPartitionsResponse) obj; + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse other = + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) obj; if (hasPartition() != other.hasPartition()) return false; if (hasPartition()) { @@ -219,7 +224,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -235,70 +240,72 @@ public int hashCode() { return hash; } - public static ListChangeStreamPartitionsResponse parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ListChangeStreamPartitionsResponse parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ListChangeStreamPartitionsResponse parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ListChangeStreamPartitionsResponse parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ListChangeStreamPartitionsResponse parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ListChangeStreamPartitionsResponse parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ListChangeStreamPartitionsResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ListChangeStreamPartitionsResponse parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static ListChangeStreamPartitionsResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static ListChangeStreamPartitionsResponse parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static ListChangeStreamPartitionsResponse parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ListChangeStreamPartitionsResponse parseFrom( + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -306,7 +313,7 @@ public static ListChangeStreamPartitionsResponse parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -315,17 +322,18 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(ListChangeStreamPartitionsResponse prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -334,33 +342,37 @@ protected Builder newBuilderForType(BuilderParent parent) { * *
        * NOTE: This API is not generally available. Users must be allowlisted.
    -   * Response message for Bigtable.ListChangeStreamPartitions.
    +   * Response message for Bigtable.GenerateInitialChangeStreamPartitions.
        * 
    * - * Protobuf type {@code google.bigtable.v2.ListChangeStreamPartitionsResponse} + * Protobuf type {@code google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ListChangeStreamPartitionsResponse) - ListChangeStreamPartitionsResponseOrBuilder { + // @@protoc_insertion_point(builder_implements:google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized(ListChangeStreamPartitionsResponse.class, Builder.class); + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.class, + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.Builder.class); } - // Construct using com.google.bigtable.v2.ListChangeStreamPartitionsResponse.newBuilder() + // Construct using + // com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -369,7 +381,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); if (partitionBuilder_ == null) { @@ -381,29 +393,32 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto - .internal_static_google_bigtable_v2_ListChangeStreamPartitionsResponse_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_GenerateInitialChangeStreamPartitionsResponse_descriptor; } - @Override - public ListChangeStreamPartitionsResponse getDefaultInstanceForType() { - return ListChangeStreamPartitionsResponse.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + getDefaultInstanceForType() { + return com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + .getDefaultInstance(); } - @Override - public ListChangeStreamPartitionsResponse build() { - ListChangeStreamPartitionsResponse result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse build() { + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public ListChangeStreamPartitionsResponse buildPartial() { - ListChangeStreamPartitionsResponse result = new ListChangeStreamPartitionsResponse(this); + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse buildPartial() { + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse result = + new com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse(this); if (partitionBuilder_ == null) { result.partition_ = partition_; } else { @@ -413,50 +428,55 @@ public ListChangeStreamPartitionsResponse buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof ListChangeStreamPartitionsResponse) { - return mergeFrom((ListChangeStreamPartitionsResponse) other); + if (other instanceof com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) { + return mergeFrom( + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(ListChangeStreamPartitionsResponse other) { - if (other == ListChangeStreamPartitionsResponse.getDefaultInstance()) return this; + public Builder mergeFrom( + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse other) { + if (other + == com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + .getDefaultInstance()) return this; if (other.hasPartition()) { mergePartition(other.getPartition()); } @@ -465,21 +485,23 @@ public Builder mergeFrom(ListChangeStreamPartitionsResponse other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - ListChangeStreamPartitionsResponse parsedMessage = null; + com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (ListChangeStreamPartitionsResponse) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) + e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -674,53 +696,58 @@ public com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder() { return partitionBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } - // @@protoc_insertion_point(builder_scope:google.bigtable.v2.ListChangeStreamPartitionsResponse) + // @@protoc_insertion_point(builder_scope:google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) } - // @@protoc_insertion_point(class_scope:google.bigtable.v2.ListChangeStreamPartitionsResponse) - private static final ListChangeStreamPartitionsResponse DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) + private static final com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new ListChangeStreamPartitionsResponse(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse(); } - public static ListChangeStreamPartitionsResponse getDefaultInstance() { + public static com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + getDefaultInstance() { return DEFAULT_INSTANCE; } - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @Override - public ListChangeStreamPartitionsResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ListChangeStreamPartitionsResponse(input, extensionRegistry); - } - }; + private static final com.google.protobuf.Parser + PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GenerateInitialChangeStreamPartitionsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GenerateInitialChangeStreamPartitionsResponse(input, extensionRegistry); + } + }; - public static com.google.protobuf.Parser parser() { + public static com.google.protobuf.Parser parser() { return PARSER; } - @Override - public com.google.protobuf.Parser getParserForType() { + @java.lang.Override + public com.google.protobuf.Parser + getParserForType() { return PARSER; } - @Override - public ListChangeStreamPartitionsResponse getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsResponseOrBuilder.java similarity index 92% rename from proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java rename to proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsResponseOrBuilder.java index 630816f767..ba1d2b2346 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ListChangeStreamPartitionsResponseOrBuilder.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/GenerateInitialChangeStreamPartitionsResponseOrBuilder.java @@ -18,9 +18,9 @@ package com.google.bigtable.v2; -public interface ListChangeStreamPartitionsResponseOrBuilder +public interface GenerateInitialChangeStreamPartitionsResponseOrBuilder extends - // @@protoc_insertion_point(interface_extends:google.bigtable.v2.ListChangeStreamPartitionsResponse) + // @@protoc_insertion_point(interface_extends:google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse) com.google.protobuf.MessageOrBuilder { /** diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java index 14d0d9024e..334a287971 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequest.java @@ -43,13 +43,13 @@ private ReadChangeStreamRequest() { appProfileId_ = ""; } - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new ReadChangeStreamRequest(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -60,7 +60,7 @@ private ReadChangeStreamRequest( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -74,25 +74,27 @@ private ReadChangeStreamRequest( break; case 10: { - String s = input.readStringRequireUtf8(); + java.lang.String s = input.readStringRequireUtf8(); tableName_ = s; break; } case 18: { - String s = input.readStringRequireUtf8(); + java.lang.String s = input.readStringRequireUtf8(); appProfileId_ = s; break; } case 26: { - StreamPartition.Builder subBuilder = null; + com.google.bigtable.v2.StreamPartition.Builder subBuilder = null; if (partition_ != null) { subBuilder = partition_.toBuilder(); } - partition_ = input.readMessage(StreamPartition.parser(), extensionRegistry); + partition_ = + input.readMessage( + com.google.bigtable.v2.StreamPartition.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(partition_); partition_ = subBuilder.buildPartial(); @@ -132,13 +134,16 @@ private ReadChangeStreamRequest( } case 50: { - StreamContinuationTokens.Builder subBuilder = null; + com.google.bigtable.v2.StreamContinuationTokens.Builder subBuilder = null; if (startFromCase_ == 6) { - subBuilder = ((StreamContinuationTokens) startFrom_).toBuilder(); + subBuilder = + ((com.google.bigtable.v2.StreamContinuationTokens) startFrom_).toBuilder(); } - startFrom_ = input.readMessage(StreamContinuationTokens.parser(), extensionRegistry); + startFrom_ = + input.readMessage( + com.google.bigtable.v2.StreamContinuationTokens.parser(), extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom((StreamContinuationTokens) startFrom_); + subBuilder.mergeFrom((com.google.bigtable.v2.StreamContinuationTokens) startFrom_); startFrom_ = subBuilder.buildPartial(); } startFromCase_ = 6; @@ -181,20 +186,27 @@ private ReadChangeStreamRequest( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized(ReadChangeStreamRequest.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamRequest.class, + com.google.bigtable.v2.ReadChangeStreamRequest.Builder.class); } private int startFromCase_ = 0; - private Object startFrom_; + private java.lang.Object startFrom_; - public enum StartFromCase implements com.google.protobuf.Internal.EnumLite, InternalOneOfEnum { + public enum StartFromCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { START_TIME(4), CONTINUATION_TOKENS(6), STARTFROM_NOT_SET(0); @@ -208,7 +220,7 @@ private StartFromCase(int value) { * @return The enum associated with the given number. * @deprecated Use {@link #forNumber(int)} instead. */ - @Deprecated + @java.lang.Deprecated public static StartFromCase valueOf(int value) { return forNumber(value); } @@ -236,7 +248,7 @@ public StartFromCase getStartFromCase() { } public static final int TABLE_NAME_FIELD_NUMBER = 1; - private volatile Object tableName_; + private volatile java.lang.Object tableName_; /** * * @@ -253,14 +265,14 @@ public StartFromCase getStartFromCase() { * * @return The tableName. */ - @Override - public String getTableName() { - Object ref = tableName_; - if (ref instanceof String) { - return (String) ref; + @java.lang.Override + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); tableName_ = s; return s; } @@ -281,11 +293,12 @@ public String getTableName() { * * @return The bytes for tableName. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getTableNameBytes() { - Object ref = tableName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); tableName_ = b; return b; } else { @@ -294,7 +307,7 @@ public com.google.protobuf.ByteString getTableNameBytes() { } public static final int APP_PROFILE_ID_FIELD_NUMBER = 2; - private volatile Object appProfileId_; + private volatile java.lang.Object appProfileId_; /** * * @@ -308,14 +321,14 @@ public com.google.protobuf.ByteString getTableNameBytes() { * * @return The appProfileId. */ - @Override - public String getAppProfileId() { - Object ref = appProfileId_; - if (ref instanceof String) { - return (String) ref; + @java.lang.Override + public java.lang.String getAppProfileId() { + java.lang.Object ref = appProfileId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); appProfileId_ = s; return s; } @@ -333,11 +346,12 @@ public String getAppProfileId() { * * @return The bytes for appProfileId. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getAppProfileIdBytes() { - Object ref = appProfileId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); + java.lang.Object ref = appProfileId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); appProfileId_ = b; return b; } else { @@ -346,7 +360,7 @@ public com.google.protobuf.ByteString getAppProfileIdBytes() { } public static final int PARTITION_FIELD_NUMBER = 3; - private StreamPartition partition_; + private com.google.bigtable.v2.StreamPartition partition_; /** * * @@ -358,7 +372,7 @@ public com.google.protobuf.ByteString getAppProfileIdBytes() { * * @return Whether the partition field is set. */ - @Override + @java.lang.Override public boolean hasPartition() { return partition_ != null; } @@ -373,9 +387,11 @@ public boolean hasPartition() { * * @return The partition. */ - @Override - public StreamPartition getPartition() { - return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + @java.lang.Override + public com.google.bigtable.v2.StreamPartition getPartition() { + return partition_ == null + ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() + : partition_; } /** * @@ -386,8 +402,8 @@ public StreamPartition getPartition() { * * .google.bigtable.v2.StreamPartition partition = 3; */ - @Override - public StreamPartitionOrBuilder getPartitionOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder() { return getPartition(); } @@ -406,7 +422,7 @@ public StreamPartitionOrBuilder getPartitionOrBuilder() { * * @return Whether the startTime field is set. */ - @Override + @java.lang.Override public boolean hasStartTime() { return startFromCase_ == 4; } @@ -424,7 +440,7 @@ public boolean hasStartTime() { * * @return The startTime. */ - @Override + @java.lang.Override public com.google.protobuf.Timestamp getStartTime() { if (startFromCase_ == 4) { return (com.google.protobuf.Timestamp) startFrom_; @@ -443,7 +459,7 @@ public com.google.protobuf.Timestamp getStartTime() { * * .google.protobuf.Timestamp start_time = 4; */ - @Override + @java.lang.Override public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { if (startFromCase_ == 4) { return (com.google.protobuf.Timestamp) startFrom_; @@ -471,7 +487,7 @@ public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { * * @return Whether the continuationTokens field is set. */ - @Override + @java.lang.Override public boolean hasContinuationTokens() { return startFromCase_ == 6; } @@ -494,12 +510,12 @@ public boolean hasContinuationTokens() { * * @return The continuationTokens. */ - @Override - public StreamContinuationTokens getContinuationTokens() { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationTokens getContinuationTokens() { if (startFromCase_ == 6) { - return (StreamContinuationTokens) startFrom_; + return (com.google.bigtable.v2.StreamContinuationTokens) startFrom_; } - return StreamContinuationTokens.getDefaultInstance(); + return com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance(); } /** * @@ -518,12 +534,12 @@ public StreamContinuationTokens getContinuationTokens() { * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - @Override - public StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { if (startFromCase_ == 6) { - return (StreamContinuationTokens) startFrom_; + return (com.google.bigtable.v2.StreamContinuationTokens) startFrom_; } - return StreamContinuationTokens.getDefaultInstance(); + return com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance(); } public static final int END_TIME_FIELD_NUMBER = 5; @@ -541,7 +557,7 @@ public StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { * * @return Whether the endTime field is set. */ - @Override + @java.lang.Override public boolean hasEndTime() { return endTime_ != null; } @@ -558,7 +574,7 @@ public boolean hasEndTime() { * * @return The endTime. */ - @Override + @java.lang.Override public com.google.protobuf.Timestamp getEndTime() { return endTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : endTime_; } @@ -573,7 +589,7 @@ public com.google.protobuf.Timestamp getEndTime() { * * .google.protobuf.Timestamp end_time = 5; */ - @Override + @java.lang.Override public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { return getEndTime(); } @@ -592,7 +608,7 @@ public com.google.protobuf.TimestampOrBuilder getEndTimeOrBuilder() { * * @return Whether the heartbeatDuration field is set. */ - @Override + @java.lang.Override public boolean hasHeartbeatDuration() { return heartbeatDuration_ != null; } @@ -608,7 +624,7 @@ public boolean hasHeartbeatDuration() { * * @return The heartbeatDuration. */ - @Override + @java.lang.Override public com.google.protobuf.Duration getHeartbeatDuration() { return heartbeatDuration_ == null ? com.google.protobuf.Duration.getDefaultInstance() @@ -624,14 +640,14 @@ public com.google.protobuf.Duration getHeartbeatDuration() { * * .google.protobuf.Duration heartbeat_duration = 7; */ - @Override + @java.lang.Override public com.google.protobuf.DurationOrBuilder getHeartbeatDurationOrBuilder() { return getHeartbeatDuration(); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -641,7 +657,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(tableName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tableName_); @@ -659,7 +675,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io output.writeMessage(5, getEndTime()); } if (startFromCase_ == 6) { - output.writeMessage(6, (StreamContinuationTokens) startFrom_); + output.writeMessage(6, (com.google.bigtable.v2.StreamContinuationTokens) startFrom_); } if (heartbeatDuration_ != null) { output.writeMessage(7, getHeartbeatDuration()); @@ -667,7 +683,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -693,7 +709,7 @@ public int getSerializedSize() { if (startFromCase_ == 6) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( - 6, (StreamContinuationTokens) startFrom_); + 6, (com.google.bigtable.v2.StreamContinuationTokens) startFrom_); } if (heartbeatDuration_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getHeartbeatDuration()); @@ -703,15 +719,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof ReadChangeStreamRequest)) { + if (!(obj instanceof com.google.bigtable.v2.ReadChangeStreamRequest)) { return super.equals(obj); } - ReadChangeStreamRequest other = (ReadChangeStreamRequest) obj; + com.google.bigtable.v2.ReadChangeStreamRequest other = + (com.google.bigtable.v2.ReadChangeStreamRequest) obj; if (!getTableName().equals(other.getTableName())) return false; if (!getAppProfileId().equals(other.getAppProfileId())) return false; @@ -742,7 +759,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -782,70 +799,71 @@ public int hashCode() { return hash; } - public static ReadChangeStreamRequest parseFrom(java.nio.ByteBuffer data) + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ReadChangeStreamRequest parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ReadChangeStreamRequest parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ReadChangeStreamRequest parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ReadChangeStreamRequest parseFrom(byte[] data) + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ReadChangeStreamRequest parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ReadChangeStreamRequest parseFrom(java.io.InputStream input) + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ReadChangeStreamRequest parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static ReadChangeStreamRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static ReadChangeStreamRequest parseDelimitedFrom( + public static com.google.bigtable.v2.ReadChangeStreamRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static ReadChangeStreamRequest parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ReadChangeStreamRequest parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -853,7 +871,7 @@ public static ReadChangeStreamRequest parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -862,17 +880,17 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(ReadChangeStreamRequest prototype) { + public static Builder newBuilder(com.google.bigtable.v2.ReadChangeStreamRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -889,16 +907,20 @@ protected Builder newBuilderForType(BuilderParent parent) { public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamRequest) - ReadChangeStreamRequestOrBuilder { + com.google.bigtable.v2.ReadChangeStreamRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized(ReadChangeStreamRequest.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamRequest.class, + com.google.bigtable.v2.ReadChangeStreamRequest.Builder.class); } // Construct using com.google.bigtable.v2.ReadChangeStreamRequest.newBuilder() @@ -906,7 +928,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -915,7 +937,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); tableName_ = ""; @@ -945,28 +967,30 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamRequest_descriptor; } - @Override - public ReadChangeStreamRequest getDefaultInstanceForType() { - return ReadChangeStreamRequest.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamRequest getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamRequest.getDefaultInstance(); } - @Override - public ReadChangeStreamRequest build() { - ReadChangeStreamRequest result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamRequest build() { + com.google.bigtable.v2.ReadChangeStreamRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public ReadChangeStreamRequest buildPartial() { - ReadChangeStreamRequest result = new ReadChangeStreamRequest(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamRequest buildPartial() { + com.google.bigtable.v2.ReadChangeStreamRequest result = + new com.google.bigtable.v2.ReadChangeStreamRequest(this); result.tableName_ = tableName_; result.appProfileId_ = appProfileId_; if (partitionBuilder_ == null) { @@ -1003,50 +1027,51 @@ public ReadChangeStreamRequest buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof ReadChangeStreamRequest) { - return mergeFrom((ReadChangeStreamRequest) other); + if (other instanceof com.google.bigtable.v2.ReadChangeStreamRequest) { + return mergeFrom((com.google.bigtable.v2.ReadChangeStreamRequest) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(ReadChangeStreamRequest other) { - if (other == ReadChangeStreamRequest.getDefaultInstance()) return this; + public Builder mergeFrom(com.google.bigtable.v2.ReadChangeStreamRequest other) { + if (other == com.google.bigtable.v2.ReadChangeStreamRequest.getDefaultInstance()) return this; if (!other.getTableName().isEmpty()) { tableName_ = other.tableName_; onChanged(); @@ -1085,21 +1110,21 @@ public Builder mergeFrom(ReadChangeStreamRequest other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - ReadChangeStreamRequest parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (ReadChangeStreamRequest) e.getUnfinishedMessage(); + parsedMessage = (com.google.bigtable.v2.ReadChangeStreamRequest) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -1110,7 +1135,7 @@ public Builder mergeFrom( } private int startFromCase_ = 0; - private Object startFrom_; + private java.lang.Object startFrom_; public StartFromCase getStartFromCase() { return StartFromCase.forNumber(startFromCase_); @@ -1123,7 +1148,7 @@ public Builder clearStartFrom() { return this; } - private Object tableName_ = ""; + private java.lang.Object tableName_ = ""; /** * * @@ -1140,15 +1165,15 @@ public Builder clearStartFrom() { * * @return The tableName. */ - public String getTableName() { - Object ref = tableName_; - if (!(ref instanceof String)) { + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); tableName_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } /** @@ -1168,10 +1193,10 @@ public String getTableName() { * @return The bytes for tableName. */ public com.google.protobuf.ByteString getTableNameBytes() { - Object ref = tableName_; + java.lang.Object ref = tableName_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); tableName_ = b; return b; } else { @@ -1195,7 +1220,7 @@ public com.google.protobuf.ByteString getTableNameBytes() { * @param value The tableName to set. * @return This builder for chaining. */ - public Builder setTableName(String value) { + public Builder setTableName(java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -1254,7 +1279,7 @@ public Builder setTableNameBytes(com.google.protobuf.ByteString value) { return this; } - private Object appProfileId_ = ""; + private java.lang.Object appProfileId_ = ""; /** * * @@ -1268,15 +1293,15 @@ public Builder setTableNameBytes(com.google.protobuf.ByteString value) { * * @return The appProfileId. */ - public String getAppProfileId() { - Object ref = appProfileId_; - if (!(ref instanceof String)) { + public java.lang.String getAppProfileId() { + java.lang.Object ref = appProfileId_; + if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); appProfileId_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } /** @@ -1293,10 +1318,10 @@ public String getAppProfileId() { * @return The bytes for appProfileId. */ public com.google.protobuf.ByteString getAppProfileIdBytes() { - Object ref = appProfileId_; + java.lang.Object ref = appProfileId_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); appProfileId_ = b; return b; } else { @@ -1317,7 +1342,7 @@ public com.google.protobuf.ByteString getAppProfileIdBytes() { * @param value The appProfileId to set. * @return This builder for chaining. */ - public Builder setAppProfileId(String value) { + public Builder setAppProfileId(java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -1370,9 +1395,11 @@ public Builder setAppProfileIdBytes(com.google.protobuf.ByteString value) { return this; } - private StreamPartition partition_; + private com.google.bigtable.v2.StreamPartition partition_; private com.google.protobuf.SingleFieldBuilderV3< - StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder> + com.google.bigtable.v2.StreamPartition, + com.google.bigtable.v2.StreamPartition.Builder, + com.google.bigtable.v2.StreamPartitionOrBuilder> partitionBuilder_; /** * @@ -1399,9 +1426,11 @@ public boolean hasPartition() { * * @return The partition. */ - public StreamPartition getPartition() { + public com.google.bigtable.v2.StreamPartition getPartition() { if (partitionBuilder_ == null) { - return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + return partition_ == null + ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() + : partition_; } else { return partitionBuilder_.getMessage(); } @@ -1415,7 +1444,7 @@ public StreamPartition getPartition() { * * .google.bigtable.v2.StreamPartition partition = 3; */ - public Builder setPartition(StreamPartition value) { + public Builder setPartition(com.google.bigtable.v2.StreamPartition value) { if (partitionBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -1437,7 +1466,7 @@ public Builder setPartition(StreamPartition value) { * * .google.bigtable.v2.StreamPartition partition = 3; */ - public Builder setPartition(StreamPartition.Builder builderForValue) { + public Builder setPartition(com.google.bigtable.v2.StreamPartition.Builder builderForValue) { if (partitionBuilder_ == null) { partition_ = builderForValue.build(); onChanged(); @@ -1456,10 +1485,13 @@ public Builder setPartition(StreamPartition.Builder builderForValue) { * * .google.bigtable.v2.StreamPartition partition = 3; */ - public Builder mergePartition(StreamPartition value) { + public Builder mergePartition(com.google.bigtable.v2.StreamPartition value) { if (partitionBuilder_ == null) { if (partition_ != null) { - partition_ = StreamPartition.newBuilder(partition_).mergeFrom(value).buildPartial(); + partition_ = + com.google.bigtable.v2.StreamPartition.newBuilder(partition_) + .mergeFrom(value) + .buildPartial(); } else { partition_ = value; } @@ -1499,7 +1531,7 @@ public Builder clearPartition() { * * .google.bigtable.v2.StreamPartition partition = 3; */ - public StreamPartition.Builder getPartitionBuilder() { + public com.google.bigtable.v2.StreamPartition.Builder getPartitionBuilder() { onChanged(); return getPartitionFieldBuilder().getBuilder(); @@ -1513,11 +1545,13 @@ public StreamPartition.Builder getPartitionBuilder() { * * .google.bigtable.v2.StreamPartition partition = 3; */ - public StreamPartitionOrBuilder getPartitionOrBuilder() { + public com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder() { if (partitionBuilder_ != null) { return partitionBuilder_.getMessageOrBuilder(); } else { - return partition_ == null ? StreamPartition.getDefaultInstance() : partition_; + return partition_ == null + ? com.google.bigtable.v2.StreamPartition.getDefaultInstance() + : partition_; } } /** @@ -1530,12 +1564,16 @@ public StreamPartitionOrBuilder getPartitionOrBuilder() { * .google.bigtable.v2.StreamPartition partition = 3; */ private com.google.protobuf.SingleFieldBuilderV3< - StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder> + com.google.bigtable.v2.StreamPartition, + com.google.bigtable.v2.StreamPartition.Builder, + com.google.bigtable.v2.StreamPartitionOrBuilder> getPartitionFieldBuilder() { if (partitionBuilder_ == null) { partitionBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - StreamPartition, StreamPartition.Builder, StreamPartitionOrBuilder>( + com.google.bigtable.v2.StreamPartition, + com.google.bigtable.v2.StreamPartition.Builder, + com.google.bigtable.v2.StreamPartitionOrBuilder>( getPartition(), getParentForChildren(), isClean()); partition_ = null; } @@ -1561,7 +1599,7 @@ public StreamPartitionOrBuilder getPartitionOrBuilder() { * * @return Whether the startTime field is set. */ - @Override + @java.lang.Override public boolean hasStartTime() { return startFromCase_ == 4; } @@ -1579,7 +1617,7 @@ public boolean hasStartTime() { * * @return The startTime. */ - @Override + @java.lang.Override public com.google.protobuf.Timestamp getStartTime() { if (startTimeBuilder_ == null) { if (startFromCase_ == 4) { @@ -1729,7 +1767,7 @@ public com.google.protobuf.Timestamp.Builder getStartTimeBuilder() { * * .google.protobuf.Timestamp start_time = 4; */ - @Override + @java.lang.Override public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { if ((startFromCase_ == 4) && (startTimeBuilder_ != null)) { return startTimeBuilder_.getMessageOrBuilder(); @@ -1776,9 +1814,9 @@ public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { } private com.google.protobuf.SingleFieldBuilderV3< - StreamContinuationTokens, - StreamContinuationTokens.Builder, - StreamContinuationTokensOrBuilder> + com.google.bigtable.v2.StreamContinuationTokens, + com.google.bigtable.v2.StreamContinuationTokens.Builder, + com.google.bigtable.v2.StreamContinuationTokensOrBuilder> continuationTokensBuilder_; /** * @@ -1799,7 +1837,7 @@ public com.google.protobuf.TimestampOrBuilder getStartTimeOrBuilder() { * * @return Whether the continuationTokens field is set. */ - @Override + @java.lang.Override public boolean hasContinuationTokens() { return startFromCase_ == 6; } @@ -1822,18 +1860,18 @@ public boolean hasContinuationTokens() { * * @return The continuationTokens. */ - @Override - public StreamContinuationTokens getContinuationTokens() { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationTokens getContinuationTokens() { if (continuationTokensBuilder_ == null) { if (startFromCase_ == 6) { - return (StreamContinuationTokens) startFrom_; + return (com.google.bigtable.v2.StreamContinuationTokens) startFrom_; } - return StreamContinuationTokens.getDefaultInstance(); + return com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance(); } else { if (startFromCase_ == 6) { return continuationTokensBuilder_.getMessage(); } - return StreamContinuationTokens.getDefaultInstance(); + return com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance(); } } /** @@ -1853,7 +1891,7 @@ public StreamContinuationTokens getContinuationTokens() { * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - public Builder setContinuationTokens(StreamContinuationTokens value) { + public Builder setContinuationTokens(com.google.bigtable.v2.StreamContinuationTokens value) { if (continuationTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -1883,7 +1921,8 @@ public Builder setContinuationTokens(StreamContinuationTokens value) { * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - public Builder setContinuationTokens(StreamContinuationTokens.Builder builderForValue) { + public Builder setContinuationTokens( + com.google.bigtable.v2.StreamContinuationTokens.Builder builderForValue) { if (continuationTokensBuilder_ == null) { startFrom_ = builderForValue.build(); onChanged(); @@ -1910,11 +1949,13 @@ public Builder setContinuationTokens(StreamContinuationTokens.Builder builderFor * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - public Builder mergeContinuationTokens(StreamContinuationTokens value) { + public Builder mergeContinuationTokens(com.google.bigtable.v2.StreamContinuationTokens value) { if (continuationTokensBuilder_ == null) { - if (startFromCase_ == 6 && startFrom_ != StreamContinuationTokens.getDefaultInstance()) { + if (startFromCase_ == 6 + && startFrom_ != com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance()) { startFrom_ = - StreamContinuationTokens.newBuilder((StreamContinuationTokens) startFrom_) + com.google.bigtable.v2.StreamContinuationTokens.newBuilder( + (com.google.bigtable.v2.StreamContinuationTokens) startFrom_) .mergeFrom(value) .buildPartial(); } else { @@ -1981,7 +2022,7 @@ public Builder clearContinuationTokens() { * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - public StreamContinuationTokens.Builder getContinuationTokensBuilder() { + public com.google.bigtable.v2.StreamContinuationTokens.Builder getContinuationTokensBuilder() { return getContinuationTokensFieldBuilder().getBuilder(); } /** @@ -2001,15 +2042,16 @@ public StreamContinuationTokens.Builder getContinuationTokensBuilder() { * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - @Override - public StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationTokensOrBuilder + getContinuationTokensOrBuilder() { if ((startFromCase_ == 6) && (continuationTokensBuilder_ != null)) { return continuationTokensBuilder_.getMessageOrBuilder(); } else { if (startFromCase_ == 6) { - return (StreamContinuationTokens) startFrom_; + return (com.google.bigtable.v2.StreamContinuationTokens) startFrom_; } - return StreamContinuationTokens.getDefaultInstance(); + return com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance(); } } /** @@ -2030,20 +2072,22 @@ public StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder() { * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ private com.google.protobuf.SingleFieldBuilderV3< - StreamContinuationTokens, - StreamContinuationTokens.Builder, - StreamContinuationTokensOrBuilder> + com.google.bigtable.v2.StreamContinuationTokens, + com.google.bigtable.v2.StreamContinuationTokens.Builder, + com.google.bigtable.v2.StreamContinuationTokensOrBuilder> getContinuationTokensFieldBuilder() { if (continuationTokensBuilder_ == null) { if (!(startFromCase_ == 6)) { - startFrom_ = StreamContinuationTokens.getDefaultInstance(); + startFrom_ = com.google.bigtable.v2.StreamContinuationTokens.getDefaultInstance(); } continuationTokensBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - StreamContinuationTokens, - StreamContinuationTokens.Builder, - StreamContinuationTokensOrBuilder>( - (StreamContinuationTokens) startFrom_, getParentForChildren(), isClean()); + com.google.bigtable.v2.StreamContinuationTokens, + com.google.bigtable.v2.StreamContinuationTokens.Builder, + com.google.bigtable.v2.StreamContinuationTokensOrBuilder>( + (com.google.bigtable.v2.StreamContinuationTokens) startFrom_, + getParentForChildren(), + isClean()); startFrom_ = null; } startFromCase_ = 6; @@ -2443,12 +2487,12 @@ public com.google.protobuf.DurationOrBuilder getHeartbeatDurationOrBuilder() { return heartbeatDurationBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -2458,19 +2502,19 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamRequest) - private static final ReadChangeStreamRequest DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamRequest DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new ReadChangeStreamRequest(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.ReadChangeStreamRequest(); } - public static ReadChangeStreamRequest getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamRequest getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public ReadChangeStreamRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2483,13 +2527,13 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public ReadChangeStreamRequest getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamRequest getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java index c62293965a..db5b8e7957 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamRequestOrBuilder.java @@ -39,7 +39,7 @@ public interface ReadChangeStreamRequestOrBuilder * * @return The tableName. */ - String getTableName(); + java.lang.String getTableName(); /** * * @@ -71,7 +71,7 @@ public interface ReadChangeStreamRequestOrBuilder * * @return The appProfileId. */ - String getAppProfileId(); + java.lang.String getAppProfileId(); /** * * @@ -110,7 +110,7 @@ public interface ReadChangeStreamRequestOrBuilder * * @return The partition. */ - StreamPartition getPartition(); + com.google.bigtable.v2.StreamPartition getPartition(); /** * * @@ -120,7 +120,7 @@ public interface ReadChangeStreamRequestOrBuilder * * .google.bigtable.v2.StreamPartition partition = 3; */ - StreamPartitionOrBuilder getPartitionOrBuilder(); + com.google.bigtable.v2.StreamPartitionOrBuilder getPartitionOrBuilder(); /** * @@ -205,7 +205,7 @@ public interface ReadChangeStreamRequestOrBuilder * * @return The continuationTokens. */ - StreamContinuationTokens getContinuationTokens(); + com.google.bigtable.v2.StreamContinuationTokens getContinuationTokens(); /** * * @@ -223,7 +223,7 @@ public interface ReadChangeStreamRequestOrBuilder * * .google.bigtable.v2.StreamContinuationTokens continuation_tokens = 6; */ - StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder(); + com.google.bigtable.v2.StreamContinuationTokensOrBuilder getContinuationTokensOrBuilder(); /** * @@ -304,5 +304,5 @@ public interface ReadChangeStreamRequestOrBuilder */ com.google.protobuf.DurationOrBuilder getHeartbeatDurationOrBuilder(); - public ReadChangeStreamRequest.StartFromCase getStartFromCase(); + public com.google.bigtable.v2.ReadChangeStreamRequest.StartFromCase getStartFromCase(); } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java index 757cebfca6..98acce8f22 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java @@ -40,13 +40,13 @@ private ReadChangeStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder< private ReadChangeStreamResponse() {} - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new ReadChangeStreamResponse(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -57,7 +57,7 @@ private ReadChangeStreamResponse( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -71,13 +71,19 @@ private ReadChangeStreamResponse( break; case 10: { - DataChange.Builder subBuilder = null; + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder subBuilder = null; if (streamRecordCase_ == 1) { - subBuilder = ((DataChange) streamRecord_).toBuilder(); + subBuilder = + ((com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_) + .toBuilder(); } - streamRecord_ = input.readMessage(DataChange.parser(), extensionRegistry); + streamRecord_ = + input.readMessage( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.parser(), + extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom((DataChange) streamRecord_); + subBuilder.mergeFrom( + (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_); streamRecord_ = subBuilder.buildPartial(); } streamRecordCase_ = 1; @@ -85,13 +91,19 @@ private ReadChangeStreamResponse( } case 18: { - Heartbeat.Builder subBuilder = null; + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder subBuilder = null; if (streamRecordCase_ == 2) { - subBuilder = ((Heartbeat) streamRecord_).toBuilder(); + subBuilder = + ((com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_) + .toBuilder(); } - streamRecord_ = input.readMessage(Heartbeat.parser(), extensionRegistry); + streamRecord_ = + input.readMessage( + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.parser(), + extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom((Heartbeat) streamRecord_); + subBuilder.mergeFrom( + (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_); streamRecord_ = subBuilder.buildPartial(); } streamRecordCase_ = 2; @@ -99,13 +111,19 @@ private ReadChangeStreamResponse( } case 26: { - CloseStream.Builder subBuilder = null; + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder subBuilder = null; if (streamRecordCase_ == 3) { - subBuilder = ((CloseStream) streamRecord_).toBuilder(); + subBuilder = + ((com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_) + .toBuilder(); } - streamRecord_ = input.readMessage(CloseStream.parser(), extensionRegistry); + streamRecord_ = + input.readMessage( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.parser(), + extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom((CloseStream) streamRecord_); + subBuilder.mergeFrom( + (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_); streamRecord_ = subBuilder.buildPartial(); } streamRecordCase_ = 3; @@ -133,14 +151,18 @@ private ReadChangeStreamResponse( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized(ReadChangeStreamResponse.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.class, + com.google.bigtable.v2.ReadChangeStreamResponse.Builder.class); } public interface MutationChunkOrBuilder @@ -175,7 +197,7 @@ public interface MutationChunkOrBuilder * * @return The chunkInfo. */ - MutationChunk.ChunkInfo getChunkInfo(); + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo getChunkInfo(); /** * * @@ -187,7 +209,8 @@ public interface MutationChunkOrBuilder * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - MutationChunk.ChunkInfoOrBuilder getChunkInfoOrBuilder(); + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder + getChunkInfoOrBuilder(); /** * @@ -216,7 +239,7 @@ public interface MutationChunkOrBuilder * * @return The mutation. */ - Mutation getMutation(); + com.google.bigtable.v2.Mutation getMutation(); /** * * @@ -228,7 +251,7 @@ public interface MutationChunkOrBuilder * * .google.bigtable.v2.Mutation mutation = 2; */ - MutationOrBuilder getMutationOrBuilder(); + com.google.bigtable.v2.MutationOrBuilder getMutationOrBuilder(); } /** * @@ -251,13 +274,13 @@ private MutationChunk(com.google.protobuf.GeneratedMessageV3.Builder builder) private MutationChunk() {} - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new MutationChunk(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -268,7 +291,7 @@ private MutationChunk( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -282,11 +305,16 @@ private MutationChunk( break; case 10: { - ChunkInfo.Builder subBuilder = null; + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder + subBuilder = null; if (chunkInfo_ != null) { subBuilder = chunkInfo_.toBuilder(); } - chunkInfo_ = input.readMessage(ChunkInfo.parser(), extensionRegistry); + chunkInfo_ = + input.readMessage( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + .parser(), + extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(chunkInfo_); chunkInfo_ = subBuilder.buildPartial(); @@ -296,11 +324,12 @@ private MutationChunk( } case 18: { - Mutation.Builder subBuilder = null; + com.google.bigtable.v2.Mutation.Builder subBuilder = null; if (mutation_ != null) { subBuilder = mutation_.toBuilder(); } - mutation_ = input.readMessage(Mutation.parser(), extensionRegistry); + mutation_ = + input.readMessage(com.google.bigtable.v2.Mutation.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(mutation_); mutation_ = subBuilder.buildPartial(); @@ -330,15 +359,18 @@ private MutationChunk( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_fieldAccessorTable - .ensureFieldAccessorsInitialized(MutationChunk.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.class, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder.class); } public interface ChunkInfoOrBuilder @@ -409,13 +441,13 @@ private ChunkInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { private ChunkInfo() {} - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new ChunkInfo(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -426,7 +458,7 @@ private ChunkInfo( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -476,15 +508,19 @@ private ChunkInfo( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized(ChunkInfo.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.class, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder + .class); } public static final int CHUNKED_VALUE_SIZE_FIELD_NUMBER = 1; @@ -500,7 +536,7 @@ protected FieldAccessorTable internalGetFieldAccessorTable() { * * @return The chunkedValueSize. */ - @Override + @java.lang.Override public int getChunkedValueSize() { return chunkedValueSize_; } @@ -519,7 +555,7 @@ public int getChunkedValueSize() { * * @return The chunkedValueOffset. */ - @Override + @java.lang.Override public int getChunkedValueOffset() { return chunkedValueOffset_; } @@ -537,14 +573,14 @@ public int getChunkedValueOffset() { * * @return The lastChunk. */ - @Override + @java.lang.Override public boolean getLastChunk() { return lastChunk_; } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -554,7 +590,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (chunkedValueSize_ != 0) { output.writeInt32(1, chunkedValueSize_); @@ -568,7 +604,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -588,15 +624,17 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof ChunkInfo)) { + if (!(obj + instanceof com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo)) { return super.equals(obj); } - ChunkInfo other = (ChunkInfo) obj; + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo other = + (com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) obj; if (getChunkedValueSize() != other.getChunkedValueSize()) return false; if (getChunkedValueOffset() != other.getChunkedValueOffset()) return false; @@ -605,7 +643,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -623,77 +661,87 @@ public int hashCode() { return hash; } - public static ChunkInfo parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ChunkInfo parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ChunkInfo parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ChunkInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ChunkInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ChunkInfo parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ChunkInfo parseFrom(java.io.InputStream input) throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ChunkInfo parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static ChunkInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static ChunkInfo parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static ChunkInfo parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ChunkInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -702,17 +750,19 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(ChunkInfo prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -731,17 +781,21 @@ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) - ChunkInfoOrBuilder { + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized(ChunkInfo.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.class, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder + .class); } // Construct using @@ -750,7 +804,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -759,7 +813,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); chunkedValueSize_ = 0; @@ -771,29 +825,34 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_ChunkInfo_descriptor; } - @Override - public ChunkInfo getDefaultInstanceForType() { - return ChunkInfo.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + .getDefaultInstance(); } - @Override - public ChunkInfo build() { - ChunkInfo result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo build() { + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo result = + buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public ChunkInfo buildPartial() { - ChunkInfo result = new ChunkInfo(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + buildPartial() { + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo result = + new com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo(this); result.chunkedValueSize_ = chunkedValueSize_; result.chunkedValueOffset_ = chunkedValueOffset_; result.lastChunk_ = lastChunk_; @@ -801,51 +860,58 @@ public ChunkInfo buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override + @java.lang.Override public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof ChunkInfo) { - return mergeFrom((ChunkInfo) other); + if (other + instanceof com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) { + return mergeFrom( + (com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(ChunkInfo other) { - if (other == ChunkInfo.getDefaultInstance()) return this; + public Builder mergeFrom( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo other) { + if (other + == com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + .getDefaultInstance()) return this; if (other.getChunkedValueSize() != 0) { setChunkedValueSize(other.getChunkedValueSize()); } @@ -860,21 +926,24 @@ public Builder mergeFrom(ChunkInfo other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - ChunkInfo parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo parsedMessage = + null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (ChunkInfo) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) + e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -896,7 +965,7 @@ public Builder mergeFrom( * * @return The chunkedValueSize. */ - @Override + @java.lang.Override public int getChunkedValueSize() { return chunkedValueSize_; } @@ -949,7 +1018,7 @@ public Builder clearChunkedValueSize() { * * @return The chunkedValueOffset. */ - @Override + @java.lang.Override public int getChunkedValueOffset() { return chunkedValueOffset_; } @@ -1003,7 +1072,7 @@ public Builder clearChunkedValueOffset() { * * @return The lastChunk. */ - @Override + @java.lang.Override public boolean getLastChunk() { return lastChunk_; } @@ -1043,13 +1112,13 @@ public Builder clearLastChunk() { return this; } - @Override + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -1059,19 +1128,22 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo) - private static final ChunkInfo DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new ChunkInfo(); + DEFAULT_INSTANCE = + new com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo(); } - public static ChunkInfo getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public ChunkInfo parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1084,19 +1156,20 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public ChunkInfo getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public static final int CHUNK_INFO_FIELD_NUMBER = 1; - private ChunkInfo chunkInfo_; + private com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunkInfo_; /** * * @@ -1110,7 +1183,7 @@ public ChunkInfo getDefaultInstanceForType() { * * @return Whether the chunkInfo field is set. */ - @Override + @java.lang.Override public boolean hasChunkInfo() { return chunkInfo_ != null; } @@ -1127,9 +1200,12 @@ public boolean hasChunkInfo() { * * @return The chunkInfo. */ - @Override - public ChunkInfo getChunkInfo() { - return chunkInfo_ == null ? ChunkInfo.getDefaultInstance() : chunkInfo_; + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo getChunkInfo() { + return chunkInfo_ == null + ? com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + .getDefaultInstance() + : chunkInfo_; } /** * @@ -1142,13 +1218,14 @@ public ChunkInfo getChunkInfo() { * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - @Override - public ChunkInfoOrBuilder getChunkInfoOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder + getChunkInfoOrBuilder() { return getChunkInfo(); } public static final int MUTATION_FIELD_NUMBER = 2; - private Mutation mutation_; + private com.google.bigtable.v2.Mutation mutation_; /** * * @@ -1162,7 +1239,7 @@ public ChunkInfoOrBuilder getChunkInfoOrBuilder() { * * @return Whether the mutation field is set. */ - @Override + @java.lang.Override public boolean hasMutation() { return mutation_ != null; } @@ -1179,9 +1256,9 @@ public boolean hasMutation() { * * @return The mutation. */ - @Override - public Mutation getMutation() { - return mutation_ == null ? Mutation.getDefaultInstance() : mutation_; + @java.lang.Override + public com.google.bigtable.v2.Mutation getMutation() { + return mutation_ == null ? com.google.bigtable.v2.Mutation.getDefaultInstance() : mutation_; } /** * @@ -1194,14 +1271,14 @@ public Mutation getMutation() { * * .google.bigtable.v2.Mutation mutation = 2; */ - @Override - public MutationOrBuilder getMutationOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.MutationOrBuilder getMutationOrBuilder() { return getMutation(); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -1211,7 +1288,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (chunkInfo_ != null) { output.writeMessage(1, getChunkInfo()); @@ -1222,7 +1299,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -1239,15 +1316,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof MutationChunk)) { + if (!(obj instanceof com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk)) { return super.equals(obj); } - MutationChunk other = (MutationChunk) obj; + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk other = + (com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) obj; if (hasChunkInfo() != other.hasChunkInfo()) return false; if (hasChunkInfo()) { @@ -1261,7 +1339,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -1281,69 +1359,71 @@ public int hashCode() { return hash; } - public static MutationChunk parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static MutationChunk parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static MutationChunk parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static MutationChunk parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static MutationChunk parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static MutationChunk parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static MutationChunk parseFrom(java.io.InputStream input) throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static MutationChunk parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static MutationChunk parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static MutationChunk parseDelimitedFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static MutationChunk parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static MutationChunk parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1351,7 +1431,7 @@ public static MutationChunk parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -1360,17 +1440,19 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(MutationChunk prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -1387,17 +1469,20 @@ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) - MutationChunkOrBuilder { + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_fieldAccessorTable - .ensureFieldAccessorsInitialized(MutationChunk.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.class, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder.class); } // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.newBuilder() @@ -1405,7 +1490,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1414,7 +1499,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); if (chunkInfoBuilder_ == null) { @@ -1432,29 +1517,31 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_MutationChunk_descriptor; } - @Override - public MutationChunk getDefaultInstanceForType() { - return MutationChunk.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.getDefaultInstance(); } - @Override - public MutationChunk build() { - MutationChunk result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk build() { + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public MutationChunk buildPartial() { - MutationChunk result = new MutationChunk(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk buildPartial() { + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk result = + new com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk(this); if (chunkInfoBuilder_ == null) { result.chunkInfo_ = chunkInfo_; } else { @@ -1469,50 +1556,56 @@ public MutationChunk buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof MutationChunk) { - return mergeFrom((MutationChunk) other); + if (other instanceof com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) { + return mergeFrom((com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(MutationChunk other) { - if (other == MutationChunk.getDefaultInstance()) return this; + public Builder mergeFrom( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk other) { + if (other + == com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.getDefaultInstance()) + return this; if (other.hasChunkInfo()) { mergeChunkInfo(other.getChunkInfo()); } @@ -1524,21 +1617,23 @@ public Builder mergeFrom(MutationChunk other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - MutationChunk parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (MutationChunk) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) + e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -1548,9 +1643,11 @@ public Builder mergeFrom( return this; } - private ChunkInfo chunkInfo_; + private com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunkInfo_; private com.google.protobuf.SingleFieldBuilderV3< - ChunkInfo, ChunkInfo.Builder, ChunkInfoOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder> chunkInfoBuilder_; /** * @@ -1581,9 +1678,13 @@ public boolean hasChunkInfo() { * * @return The chunkInfo. */ - public ChunkInfo getChunkInfo() { + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + getChunkInfo() { if (chunkInfoBuilder_ == null) { - return chunkInfo_ == null ? ChunkInfo.getDefaultInstance() : chunkInfo_; + return chunkInfo_ == null + ? com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + .getDefaultInstance() + : chunkInfo_; } else { return chunkInfoBuilder_.getMessage(); } @@ -1599,7 +1700,8 @@ public ChunkInfo getChunkInfo() { * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - public Builder setChunkInfo(ChunkInfo value) { + public Builder setChunkInfo( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo value) { if (chunkInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -1623,7 +1725,9 @@ public Builder setChunkInfo(ChunkInfo value) { * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - public Builder setChunkInfo(ChunkInfo.Builder builderForValue) { + public Builder setChunkInfo( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder + builderForValue) { if (chunkInfoBuilder_ == null) { chunkInfo_ = builderForValue.build(); onChanged(); @@ -1644,10 +1748,15 @@ public Builder setChunkInfo(ChunkInfo.Builder builderForValue) { * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - public Builder mergeChunkInfo(ChunkInfo value) { + public Builder mergeChunkInfo( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo value) { if (chunkInfoBuilder_ == null) { if (chunkInfo_ != null) { - chunkInfo_ = ChunkInfo.newBuilder(chunkInfo_).mergeFrom(value).buildPartial(); + chunkInfo_ = + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.newBuilder( + chunkInfo_) + .mergeFrom(value) + .buildPartial(); } else { chunkInfo_ = value; } @@ -1691,7 +1800,8 @@ public Builder clearChunkInfo() { * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - public ChunkInfo.Builder getChunkInfoBuilder() { + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder + getChunkInfoBuilder() { onChanged(); return getChunkInfoFieldBuilder().getBuilder(); @@ -1707,11 +1817,15 @@ public ChunkInfo.Builder getChunkInfoBuilder() { * .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo chunk_info = 1; * */ - public ChunkInfoOrBuilder getChunkInfoOrBuilder() { + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder + getChunkInfoOrBuilder() { if (chunkInfoBuilder_ != null) { return chunkInfoBuilder_.getMessageOrBuilder(); } else { - return chunkInfo_ == null ? ChunkInfo.getDefaultInstance() : chunkInfo_; + return chunkInfo_ == null + ? com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + .getDefaultInstance() + : chunkInfo_; } } /** @@ -1726,21 +1840,27 @@ public ChunkInfoOrBuilder getChunkInfoOrBuilder() { *
    */ private com.google.protobuf.SingleFieldBuilderV3< - ChunkInfo, ChunkInfo.Builder, ChunkInfoOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder> getChunkInfoFieldBuilder() { if (chunkInfoBuilder_ == null) { chunkInfoBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - ChunkInfo, ChunkInfo.Builder, ChunkInfoOrBuilder>( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfoOrBuilder>( getChunkInfo(), getParentForChildren(), isClean()); chunkInfo_ = null; } return chunkInfoBuilder_; } - private Mutation mutation_; + private com.google.bigtable.v2.Mutation mutation_; private com.google.protobuf.SingleFieldBuilderV3< - Mutation, Mutation.Builder, MutationOrBuilder> + com.google.bigtable.v2.Mutation, + com.google.bigtable.v2.Mutation.Builder, + com.google.bigtable.v2.MutationOrBuilder> mutationBuilder_; /** * @@ -1771,9 +1891,11 @@ public boolean hasMutation() { * * @return The mutation. */ - public Mutation getMutation() { + public com.google.bigtable.v2.Mutation getMutation() { if (mutationBuilder_ == null) { - return mutation_ == null ? Mutation.getDefaultInstance() : mutation_; + return mutation_ == null + ? com.google.bigtable.v2.Mutation.getDefaultInstance() + : mutation_; } else { return mutationBuilder_.getMessage(); } @@ -1789,7 +1911,7 @@ public Mutation getMutation() { * * .google.bigtable.v2.Mutation mutation = 2; */ - public Builder setMutation(Mutation value) { + public Builder setMutation(com.google.bigtable.v2.Mutation value) { if (mutationBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -1813,7 +1935,7 @@ public Builder setMutation(Mutation value) { * * .google.bigtable.v2.Mutation mutation = 2; */ - public Builder setMutation(Mutation.Builder builderForValue) { + public Builder setMutation(com.google.bigtable.v2.Mutation.Builder builderForValue) { if (mutationBuilder_ == null) { mutation_ = builderForValue.build(); onChanged(); @@ -1834,10 +1956,13 @@ public Builder setMutation(Mutation.Builder builderForValue) { * * .google.bigtable.v2.Mutation mutation = 2; */ - public Builder mergeMutation(Mutation value) { + public Builder mergeMutation(com.google.bigtable.v2.Mutation value) { if (mutationBuilder_ == null) { if (mutation_ != null) { - mutation_ = Mutation.newBuilder(mutation_).mergeFrom(value).buildPartial(); + mutation_ = + com.google.bigtable.v2.Mutation.newBuilder(mutation_) + .mergeFrom(value) + .buildPartial(); } else { mutation_ = value; } @@ -1881,7 +2006,7 @@ public Builder clearMutation() { * * .google.bigtable.v2.Mutation mutation = 2; */ - public Mutation.Builder getMutationBuilder() { + public com.google.bigtable.v2.Mutation.Builder getMutationBuilder() { onChanged(); return getMutationFieldBuilder().getBuilder(); @@ -1897,11 +2022,13 @@ public Mutation.Builder getMutationBuilder() { * * .google.bigtable.v2.Mutation mutation = 2; */ - public MutationOrBuilder getMutationOrBuilder() { + public com.google.bigtable.v2.MutationOrBuilder getMutationOrBuilder() { if (mutationBuilder_ != null) { return mutationBuilder_.getMessageOrBuilder(); } else { - return mutation_ == null ? Mutation.getDefaultInstance() : mutation_; + return mutation_ == null + ? com.google.bigtable.v2.Mutation.getDefaultInstance() + : mutation_; } } /** @@ -1916,25 +2043,29 @@ public MutationOrBuilder getMutationOrBuilder() { * .google.bigtable.v2.Mutation mutation = 2; */ private com.google.protobuf.SingleFieldBuilderV3< - Mutation, Mutation.Builder, MutationOrBuilder> + com.google.bigtable.v2.Mutation, + com.google.bigtable.v2.Mutation.Builder, + com.google.bigtable.v2.MutationOrBuilder> getMutationFieldBuilder() { if (mutationBuilder_ == null) { mutationBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - Mutation, Mutation.Builder, MutationOrBuilder>( + com.google.bigtable.v2.Mutation, + com.google.bigtable.v2.Mutation.Builder, + com.google.bigtable.v2.MutationOrBuilder>( getMutation(), getParentForChildren(), isClean()); mutation_ = null; } return mutationBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -1944,19 +2075,21 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.MutationChunk) - private static final MutationChunk DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new MutationChunk(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk(); } - public static MutationChunk getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public MutationChunk parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1969,13 +2102,14 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public MutationChunk getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } @@ -2008,7 +2142,7 @@ public interface DataChangeOrBuilder * * @return The type. */ - DataChange.Type getType(); + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type getType(); /** * @@ -2022,7 +2156,7 @@ public interface DataChangeOrBuilder * * @return The sourceClusterId. */ - String getSourceClusterId(); + java.lang.String getSourceClusterId(); /** * * @@ -2117,7 +2251,7 @@ public interface DataChangeOrBuilder * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - java.util.List getChunksList(); + java.util.List getChunksList(); /** * * @@ -2129,7 +2263,7 @@ public interface DataChangeOrBuilder * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - MutationChunk getChunks(int index); + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk getChunks(int index); /** * * @@ -2153,7 +2287,8 @@ public interface DataChangeOrBuilder * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - java.util.List getChunksOrBuilderList(); + java.util.List + getChunksOrBuilderList(); /** * * @@ -2165,7 +2300,8 @@ public interface DataChangeOrBuilder * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - MutationChunkOrBuilder getChunksOrBuilder(int index); + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder getChunksOrBuilder( + int index); /** * @@ -2193,7 +2329,7 @@ public interface DataChangeOrBuilder * * @return The token. */ - String getToken(); + java.lang.String getToken(); /** * * @@ -2281,13 +2417,13 @@ private DataChange() { token_ = ""; } - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new DataChange(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -2298,7 +2434,7 @@ private DataChange( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = @@ -2320,7 +2456,7 @@ private DataChange( } case 18: { - String s = input.readStringRequireUtf8(); + java.lang.String s = input.readStringRequireUtf8(); sourceClusterId_ = s; break; @@ -2353,10 +2489,15 @@ private DataChange( case 50: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { - chunks_ = new java.util.ArrayList(); + chunks_ = + new java.util.ArrayList< + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk>(); mutable_bitField0_ |= 0x00000001; } - chunks_.add(input.readMessage(MutationChunk.parser(), extensionRegistry)); + chunks_.add( + input.readMessage( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.parser(), + extensionRegistry)); break; } case 64: @@ -2366,7 +2507,7 @@ private DataChange( } case 74: { - String s = input.readStringRequireUtf8(); + java.lang.String s = input.readStringRequireUtf8(); token_ = s; break; @@ -2411,15 +2552,18 @@ private DataChange( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_fieldAccessorTable - .ensureFieldAccessorsInitialized(DataChange.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.class, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder.class); } /** @@ -2520,7 +2664,8 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { public final int getNumber() { if (this == UNRECOGNIZED) { - throw new IllegalArgumentException("Can't get the number of an unknown enum value."); + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); } return value; } @@ -2530,7 +2675,7 @@ public final int getNumber() { * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ - @Deprecated + @java.lang.Deprecated public static Type valueOf(int value) { return forNumber(value); } @@ -2567,7 +2712,7 @@ public Type findValueByNumber(int number) { public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { if (this == UNRECOGNIZED) { - throw new IllegalStateException( + throw new java.lang.IllegalStateException( "Can't get the descriptor of an unrecognized enum value."); } return getDescriptor().getValues().get(ordinal()); @@ -2578,14 +2723,16 @@ public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return DataChange.getDescriptor().getEnumTypes().get(0); + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDescriptor() + .getEnumTypes() + .get(0); } private static final Type[] VALUES = values(); public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { - throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); } if (desc.getIndex() == -1) { return UNRECOGNIZED; @@ -2615,7 +2762,7 @@ private Type(int value) { * * @return The enum numeric value on the wire for type. */ - @Override + @java.lang.Override public int getTypeValue() { return type_; } @@ -2630,15 +2777,18 @@ public int getTypeValue() { * * @return The type. */ - @Override - public Type getType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type getType() { @SuppressWarnings("deprecation") - Type result = Type.valueOf(type_); - return result == null ? Type.UNRECOGNIZED : result; + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type result = + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type.valueOf(type_); + return result == null + ? com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type.UNRECOGNIZED + : result; } public static final int SOURCE_CLUSTER_ID_FIELD_NUMBER = 2; - private volatile Object sourceClusterId_; + private volatile java.lang.Object sourceClusterId_; /** * * @@ -2651,14 +2801,14 @@ public Type getType() { * * @return The sourceClusterId. */ - @Override - public String getSourceClusterId() { - Object ref = sourceClusterId_; - if (ref instanceof String) { - return (String) ref; + @java.lang.Override + public java.lang.String getSourceClusterId() { + java.lang.Object ref = sourceClusterId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); sourceClusterId_ = s; return s; } @@ -2675,12 +2825,12 @@ public String getSourceClusterId() { * * @return The bytes for sourceClusterId. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getSourceClusterIdBytes() { - Object ref = sourceClusterId_; - if (ref instanceof String) { + java.lang.Object ref = sourceClusterId_; + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); sourceClusterId_ = b; return b; } else { @@ -2703,7 +2853,7 @@ public com.google.protobuf.ByteString getSourceClusterIdBytes() { * * @return The rowKey. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getRowKey() { return rowKey_; } @@ -2721,7 +2871,7 @@ public com.google.protobuf.ByteString getRowKey() { * * @return Whether the commitTimestamp field is set. */ - @Override + @java.lang.Override public boolean hasCommitTimestamp() { return commitTimestamp_ != null; } @@ -2736,7 +2886,7 @@ public boolean hasCommitTimestamp() { * * @return The commitTimestamp. */ - @Override + @java.lang.Override public com.google.protobuf.Timestamp getCommitTimestamp() { return commitTimestamp_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() @@ -2751,7 +2901,7 @@ public com.google.protobuf.Timestamp getCommitTimestamp() { * * .google.protobuf.Timestamp commit_timestamp = 4; */ - @Override + @java.lang.Override public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { return getCommitTimestamp(); } @@ -2775,13 +2925,13 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { * * @return The tiebreaker. */ - @Override + @java.lang.Override public int getTiebreaker() { return tiebreaker_; } public static final int CHUNKS_FIELD_NUMBER = 6; - private java.util.List chunks_; + private java.util.List chunks_; /** * * @@ -2793,8 +2943,9 @@ public int getTiebreaker() { * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - @Override - public java.util.List getChunksList() { + @java.lang.Override + public java.util.List + getChunksList() { return chunks_; } /** @@ -2808,8 +2959,10 @@ public java.util.List getChunksList() { * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - @Override - public java.util.List getChunksOrBuilderList() { + @java.lang.Override + public java.util.List< + ? extends com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder> + getChunksOrBuilderList() { return chunks_; } /** @@ -2823,7 +2976,7 @@ public java.util.List getChunksOrBuilderList() * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - @Override + @java.lang.Override public int getChunksCount() { return chunks_.size(); } @@ -2838,8 +2991,8 @@ public int getChunksCount() { * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - @Override - public MutationChunk getChunks(int index) { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk getChunks(int index) { return chunks_.get(index); } /** @@ -2853,8 +3006,9 @@ public MutationChunk getChunks(int index) { * * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; */ - @Override - public MutationChunkOrBuilder getChunksOrBuilder(int index) { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder + getChunksOrBuilder(int index) { return chunks_.get(index); } @@ -2872,13 +3026,13 @@ public MutationChunkOrBuilder getChunksOrBuilder(int index) { * * @return The done. */ - @Override + @java.lang.Override public boolean getDone() { return done_; } public static final int TOKEN_FIELD_NUMBER = 9; - private volatile Object token_; + private volatile java.lang.Object token_; /** * * @@ -2891,14 +3045,14 @@ public boolean getDone() { * * @return The token. */ - @Override - public String getToken() { - Object ref = token_; - if (ref instanceof String) { - return (String) ref; + @java.lang.Override + public java.lang.String getToken() { + java.lang.Object ref = token_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); token_ = s; return s; } @@ -2915,12 +3069,12 @@ public String getToken() { * * @return The bytes for token. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getTokenBytes() { - Object ref = token_; - if (ref instanceof String) { + java.lang.Object ref = token_; + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); token_ = b; return b; } else { @@ -2943,7 +3097,7 @@ public com.google.protobuf.ByteString getTokenBytes() { * * @return Whether the lowWatermark field is set. */ - @Override + @java.lang.Override public boolean hasLowWatermark() { return lowWatermark_ != null; } @@ -2960,7 +3114,7 @@ public boolean hasLowWatermark() { * * @return The lowWatermark. */ - @Override + @java.lang.Override public com.google.protobuf.Timestamp getLowWatermark() { return lowWatermark_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() @@ -2977,14 +3131,14 @@ public com.google.protobuf.Timestamp getLowWatermark() { * * .google.protobuf.Timestamp low_watermark = 10; */ - @Override + @java.lang.Override public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { return getLowWatermark(); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -2994,9 +3148,11 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (type_ != Type.TYPE_UNSPECIFIED.getNumber()) { + if (type_ + != com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type.TYPE_UNSPECIFIED + .getNumber()) { output.writeEnum(1, type_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(sourceClusterId_)) { @@ -3026,13 +3182,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; - if (type_ != Type.TYPE_UNSPECIFIED.getNumber()) { + if (type_ + != com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type.TYPE_UNSPECIFIED + .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, type_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(sourceClusterId_)) { @@ -3064,15 +3222,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof DataChange)) { + if (!(obj instanceof com.google.bigtable.v2.ReadChangeStreamResponse.DataChange)) { return super.equals(obj); } - DataChange other = (DataChange) obj; + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange other = + (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) obj; if (type_ != other.type_) return false; if (!getSourceClusterId().equals(other.getSourceClusterId())) return false; @@ -3093,7 +3252,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -3129,69 +3288,71 @@ public int hashCode() { return hash; } - public static DataChange parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static DataChange parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static DataChange parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static DataChange parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static DataChange parseFrom(byte[] data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static DataChange parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static DataChange parseFrom(java.io.InputStream input) throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static DataChange parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static DataChange parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static DataChange parseDelimitedFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static DataChange parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static DataChange parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3199,7 +3360,7 @@ public static DataChange parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -3208,17 +3369,19 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(DataChange prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -3240,17 +3403,20 @@ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.DataChange) - DataChangeOrBuilder { + com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_fieldAccessorTable - .ensureFieldAccessorsInitialized(DataChange.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.class, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder.class); } // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.newBuilder() @@ -3258,7 +3424,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3269,7 +3435,7 @@ private void maybeForceBuilderInitialization() { } } - @Override + @java.lang.Override public Builder clear() { super.clear(); type_ = 0; @@ -3305,29 +3471,31 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_DataChange_descriptor; } - @Override - public DataChange getDefaultInstanceForType() { - return DataChange.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange + getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } - @Override - public DataChange build() { - DataChange result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange build() { + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public DataChange buildPartial() { - DataChange result = new DataChange(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange buildPartial() { + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange result = + new com.google.bigtable.v2.ReadChangeStreamResponse.DataChange(this); int from_bitField0_ = bitField0_; result.type_ = type_; result.sourceClusterId_ = sourceClusterId_; @@ -3358,50 +3526,55 @@ public DataChange buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof DataChange) { - return mergeFrom((DataChange) other); + if (other instanceof com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) { + return mergeFrom((com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(DataChange other) { - if (other == DataChange.getDefaultInstance()) return this; + public Builder mergeFrom(com.google.bigtable.v2.ReadChangeStreamResponse.DataChange other) { + if (other + == com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance()) + return this; if (other.type_ != 0) { setTypeValue(other.getTypeValue()); } @@ -3460,21 +3633,22 @@ public Builder mergeFrom(DataChange other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - DataChange parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (DataChange) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -3498,7 +3672,7 @@ public Builder mergeFrom( * * @return The enum numeric value on the wire for type. */ - @Override + @java.lang.Override public int getTypeValue() { return type_; } @@ -3531,11 +3705,14 @@ public Builder setTypeValue(int value) { * * @return The type. */ - @Override - public Type getType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type getType() { @SuppressWarnings("deprecation") - Type result = Type.valueOf(type_); - return result == null ? Type.UNRECOGNIZED : result; + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type result = + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type.valueOf(type_); + return result == null + ? com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type.UNRECOGNIZED + : result; } /** * @@ -3549,7 +3726,8 @@ public Type getType() { * @param value The type to set. * @return This builder for chaining. */ - public Builder setType(Type value) { + public Builder setType( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type value) { if (value == null) { throw new NullPointerException(); } @@ -3576,7 +3754,7 @@ public Builder clearType() { return this; } - private Object sourceClusterId_ = ""; + private java.lang.Object sourceClusterId_ = ""; /** * * @@ -3589,15 +3767,15 @@ public Builder clearType() { * * @return The sourceClusterId. */ - public String getSourceClusterId() { - Object ref = sourceClusterId_; - if (!(ref instanceof String)) { + public java.lang.String getSourceClusterId() { + java.lang.Object ref = sourceClusterId_; + if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); sourceClusterId_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } /** @@ -3613,10 +3791,10 @@ public String getSourceClusterId() { * @return The bytes for sourceClusterId. */ public com.google.protobuf.ByteString getSourceClusterIdBytes() { - Object ref = sourceClusterId_; + java.lang.Object ref = sourceClusterId_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); sourceClusterId_ = b; return b; } else { @@ -3636,7 +3814,7 @@ public com.google.protobuf.ByteString getSourceClusterIdBytes() { * @param value The sourceClusterId to set. * @return This builder for chaining. */ - public Builder setSourceClusterId(String value) { + public Builder setSourceClusterId(java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -3701,7 +3879,7 @@ public Builder setSourceClusterIdBytes(com.google.protobuf.ByteString value) { * * @return The rowKey. */ - @Override + @java.lang.Override public com.google.protobuf.ByteString getRowKey() { return rowKey_; } @@ -3951,7 +4129,7 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { * * @return The tiebreaker. */ - @Override + @java.lang.Override public int getTiebreaker() { return tiebreaker_; } @@ -4003,17 +4181,22 @@ public Builder clearTiebreaker() { return this; } - private java.util.List chunks_ = java.util.Collections.emptyList(); + private java.util.List + chunks_ = java.util.Collections.emptyList(); private void ensureChunksIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { - chunks_ = new java.util.ArrayList(chunks_); + chunks_ = + new java.util.ArrayList< + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk>(chunks_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< - MutationChunk, MutationChunk.Builder, MutationChunkOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder> chunksBuilder_; /** @@ -4028,7 +4211,8 @@ private void ensureChunksIsMutable() { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public java.util.List getChunksList() { + public java.util.List + getChunksList() { if (chunksBuilder_ == null) { return java.util.Collections.unmodifiableList(chunks_); } else { @@ -4066,7 +4250,7 @@ public int getChunksCount() { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public MutationChunk getChunks(int index) { + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk getChunks(int index) { if (chunksBuilder_ == null) { return chunks_.get(index); } else { @@ -4085,7 +4269,8 @@ public MutationChunk getChunks(int index) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder setChunks(int index, MutationChunk value) { + public Builder setChunks( + int index, com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk value) { if (chunksBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -4110,7 +4295,9 @@ public Builder setChunks(int index, MutationChunk value) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder setChunks(int index, MutationChunk.Builder builderForValue) { + public Builder setChunks( + int index, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder builderForValue) { if (chunksBuilder_ == null) { ensureChunksIsMutable(); chunks_.set(index, builderForValue.build()); @@ -4132,7 +4319,8 @@ public Builder setChunks(int index, MutationChunk.Builder builderForValue) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder addChunks(MutationChunk value) { + public Builder addChunks( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk value) { if (chunksBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -4157,7 +4345,8 @@ public Builder addChunks(MutationChunk value) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder addChunks(int index, MutationChunk value) { + public Builder addChunks( + int index, com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk value) { if (chunksBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -4182,7 +4371,8 @@ public Builder addChunks(int index, MutationChunk value) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder addChunks(MutationChunk.Builder builderForValue) { + public Builder addChunks( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder builderForValue) { if (chunksBuilder_ == null) { ensureChunksIsMutable(); chunks_.add(builderForValue.build()); @@ -4204,7 +4394,9 @@ public Builder addChunks(MutationChunk.Builder builderForValue) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder addChunks(int index, MutationChunk.Builder builderForValue) { + public Builder addChunks( + int index, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder builderForValue) { if (chunksBuilder_ == null) { ensureChunksIsMutable(); chunks_.add(index, builderForValue.build()); @@ -4226,7 +4418,10 @@ public Builder addChunks(int index, MutationChunk.Builder builderForValue) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public Builder addAllChunks(Iterable values) { + public Builder addAllChunks( + java.lang.Iterable< + ? extends com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk> + values) { if (chunksBuilder_ == null) { ensureChunksIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, chunks_); @@ -4292,7 +4487,8 @@ public Builder removeChunks(int index) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public MutationChunk.Builder getChunksBuilder(int index) { + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder getChunksBuilder( + int index) { return getChunksFieldBuilder().getBuilder(index); } /** @@ -4307,7 +4503,8 @@ public MutationChunk.Builder getChunksBuilder(int index) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public MutationChunkOrBuilder getChunksOrBuilder(int index) { + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder + getChunksOrBuilder(int index) { if (chunksBuilder_ == null) { return chunks_.get(index); } else { @@ -4326,7 +4523,9 @@ public MutationChunkOrBuilder getChunksOrBuilder(int index) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public java.util.List getChunksOrBuilderList() { + public java.util.List< + ? extends com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder> + getChunksOrBuilderList() { if (chunksBuilder_ != null) { return chunksBuilder_.getMessageOrBuilderList(); } else { @@ -4345,8 +4544,11 @@ public java.util.List getChunksOrBuilderList() * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public MutationChunk.Builder addChunksBuilder() { - return getChunksFieldBuilder().addBuilder(MutationChunk.getDefaultInstance()); + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder + addChunksBuilder() { + return getChunksFieldBuilder() + .addBuilder( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.getDefaultInstance()); } /** * @@ -4360,8 +4562,12 @@ public MutationChunk.Builder addChunksBuilder() { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public MutationChunk.Builder addChunksBuilder(int index) { - return getChunksFieldBuilder().addBuilder(index, MutationChunk.getDefaultInstance()); + public com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder addChunksBuilder( + int index) { + return getChunksFieldBuilder() + .addBuilder( + index, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.getDefaultInstance()); } /** * @@ -4375,17 +4581,22 @@ public MutationChunk.Builder addChunksBuilder(int index) { * repeated .google.bigtable.v2.ReadChangeStreamResponse.MutationChunk chunks = 6; * */ - public java.util.List getChunksBuilderList() { + public java.util.List + getChunksBuilderList() { return getChunksFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - MutationChunk, MutationChunk.Builder, MutationChunkOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder> getChunksFieldBuilder() { if (chunksBuilder_ == null) { chunksBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - MutationChunk, MutationChunk.Builder, MutationChunkOrBuilder>( + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.MutationChunkOrBuilder>( chunks_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); chunks_ = null; } @@ -4405,7 +4616,7 @@ public java.util.List getChunksBuilderList() { * * @return The done. */ - @Override + @java.lang.Override public boolean getDone() { return done_; } @@ -4447,7 +4658,7 @@ public Builder clearDone() { return this; } - private Object token_ = ""; + private java.lang.Object token_ = ""; /** * * @@ -4460,15 +4671,15 @@ public Builder clearDone() { * * @return The token. */ - public String getToken() { - Object ref = token_; - if (!(ref instanceof String)) { + public java.lang.String getToken() { + java.lang.Object ref = token_; + if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); + java.lang.String s = bs.toStringUtf8(); token_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } /** @@ -4484,10 +4695,10 @@ public String getToken() { * @return The bytes for token. */ public com.google.protobuf.ByteString getTokenBytes() { - Object ref = token_; + java.lang.Object ref = token_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); token_ = b; return b; } else { @@ -4507,7 +4718,7 @@ public com.google.protobuf.ByteString getTokenBytes() { * @param value The token to set. * @return This builder for chaining. */ - public Builder setToken(String value) { + public Builder setToken(java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -4761,13 +4972,13 @@ public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { return lowWatermarkBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -4777,19 +4988,20 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.DataChange) - private static final DataChange DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamResponse.DataChange + DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new DataChange(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.ReadChangeStreamResponse.DataChange(); } - public static DataChange getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamResponse.DataChange getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public DataChange parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -4802,13 +5014,13 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public DataChange getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } @@ -4843,7 +5055,7 @@ public interface HeartbeatOrBuilder * * @return The continuationToken. */ - StreamContinuationToken getContinuationToken(); + com.google.bigtable.v2.StreamContinuationToken getContinuationToken(); /** * * @@ -4854,7 +5066,7 @@ public interface HeartbeatOrBuilder * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder(); + com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder(); /** * @@ -4919,13 +5131,13 @@ private Heartbeat(com.google.protobuf.GeneratedMessageV3.Builder builder) { private Heartbeat() {} - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new Heartbeat(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -4936,7 +5148,7 @@ private Heartbeat( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -4950,12 +5162,13 @@ private Heartbeat( break; case 10: { - StreamContinuationToken.Builder subBuilder = null; + com.google.bigtable.v2.StreamContinuationToken.Builder subBuilder = null; if (continuationToken_ != null) { subBuilder = continuationToken_.toBuilder(); } continuationToken_ = - input.readMessage(StreamContinuationToken.parser(), extensionRegistry); + input.readMessage( + com.google.bigtable.v2.StreamContinuationToken.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(continuationToken_); continuationToken_ = subBuilder.buildPartial(); @@ -5000,19 +5213,22 @@ private Heartbeat( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_fieldAccessorTable - .ensureFieldAccessorsInitialized(Heartbeat.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.class, + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder.class); } public static final int CONTINUATION_TOKEN_FIELD_NUMBER = 1; - private StreamContinuationToken continuationToken_; + private com.google.bigtable.v2.StreamContinuationToken continuationToken_; /** * * @@ -5025,7 +5241,7 @@ protected FieldAccessorTable internalGetFieldAccessorTable() { * * @return Whether the continuationToken field is set. */ - @Override + @java.lang.Override public boolean hasContinuationToken() { return continuationToken_ != null; } @@ -5041,10 +5257,10 @@ public boolean hasContinuationToken() { * * @return The continuationToken. */ - @Override - public StreamContinuationToken getContinuationToken() { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationToken getContinuationToken() { return continuationToken_ == null - ? StreamContinuationToken.getDefaultInstance() + ? com.google.bigtable.v2.StreamContinuationToken.getDefaultInstance() : continuationToken_; } /** @@ -5057,8 +5273,8 @@ public StreamContinuationToken getContinuationToken() { * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - @Override - public StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { return getContinuationToken(); } @@ -5077,7 +5293,7 @@ public StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { * * @return Whether the lowWatermark field is set. */ - @Override + @java.lang.Override public boolean hasLowWatermark() { return lowWatermark_ != null; } @@ -5094,7 +5310,7 @@ public boolean hasLowWatermark() { * * @return The lowWatermark. */ - @Override + @java.lang.Override public com.google.protobuf.Timestamp getLowWatermark() { return lowWatermark_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() @@ -5111,14 +5327,14 @@ public com.google.protobuf.Timestamp getLowWatermark() { * * .google.protobuf.Timestamp low_watermark = 2; */ - @Override + @java.lang.Override public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { return getLowWatermark(); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -5128,7 +5344,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (continuationToken_ != null) { output.writeMessage(1, getContinuationToken()); @@ -5139,7 +5355,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -5156,15 +5372,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof Heartbeat)) { + if (!(obj instanceof com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat)) { return super.equals(obj); } - Heartbeat other = (Heartbeat) obj; + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat other = + (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) obj; if (hasContinuationToken() != other.hasContinuationToken()) return false; if (hasContinuationToken()) { @@ -5178,7 +5395,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -5198,69 +5415,71 @@ public int hashCode() { return hash; } - public static Heartbeat parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static Heartbeat parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static Heartbeat parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static Heartbeat parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static Heartbeat parseFrom(byte[] data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static Heartbeat parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static Heartbeat parseFrom(java.io.InputStream input) throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static Heartbeat parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static Heartbeat parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static Heartbeat parseDelimitedFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static Heartbeat parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static Heartbeat parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5268,7 +5487,7 @@ public static Heartbeat parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -5277,17 +5496,19 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(Heartbeat prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -5305,17 +5526,20 @@ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) - HeartbeatOrBuilder { + com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_fieldAccessorTable - .ensureFieldAccessorsInitialized(Heartbeat.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.class, + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder.class); } // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.newBuilder() @@ -5323,7 +5547,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -5332,7 +5556,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); if (continuationTokenBuilder_ == null) { @@ -5350,29 +5574,30 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_Heartbeat_descriptor; } - @Override - public Heartbeat getDefaultInstanceForType() { - return Heartbeat.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } - @Override - public Heartbeat build() { - Heartbeat result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat build() { + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public Heartbeat buildPartial() { - Heartbeat result = new Heartbeat(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat buildPartial() { + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat result = + new com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat(this); if (continuationTokenBuilder_ == null) { result.continuationToken_ = continuationToken_; } else { @@ -5387,50 +5612,54 @@ public Heartbeat buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Heartbeat) { - return mergeFrom((Heartbeat) other); + if (other instanceof com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) { + return mergeFrom((com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(Heartbeat other) { - if (other == Heartbeat.getDefaultInstance()) return this; + public Builder mergeFrom(com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat other) { + if (other == com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance()) + return this; if (other.hasContinuationToken()) { mergeContinuationToken(other.getContinuationToken()); } @@ -5442,21 +5671,22 @@ public Builder mergeFrom(Heartbeat other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Heartbeat parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Heartbeat) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -5466,11 +5696,11 @@ public Builder mergeFrom( return this; } - private StreamContinuationToken continuationToken_; + private com.google.bigtable.v2.StreamContinuationToken continuationToken_; private com.google.protobuf.SingleFieldBuilderV3< - StreamContinuationToken, - StreamContinuationToken.Builder, - StreamContinuationTokenOrBuilder> + com.google.bigtable.v2.StreamContinuationToken, + com.google.bigtable.v2.StreamContinuationToken.Builder, + com.google.bigtable.v2.StreamContinuationTokenOrBuilder> continuationTokenBuilder_; /** * @@ -5499,10 +5729,10 @@ public boolean hasContinuationToken() { * * @return The continuationToken. */ - public StreamContinuationToken getContinuationToken() { + public com.google.bigtable.v2.StreamContinuationToken getContinuationToken() { if (continuationTokenBuilder_ == null) { return continuationToken_ == null - ? StreamContinuationToken.getDefaultInstance() + ? com.google.bigtable.v2.StreamContinuationToken.getDefaultInstance() : continuationToken_; } else { return continuationTokenBuilder_.getMessage(); @@ -5518,7 +5748,7 @@ public StreamContinuationToken getContinuationToken() { * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - public Builder setContinuationToken(StreamContinuationToken value) { + public Builder setContinuationToken(com.google.bigtable.v2.StreamContinuationToken value) { if (continuationTokenBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -5541,7 +5771,8 @@ public Builder setContinuationToken(StreamContinuationToken value) { * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - public Builder setContinuationToken(StreamContinuationToken.Builder builderForValue) { + public Builder setContinuationToken( + com.google.bigtable.v2.StreamContinuationToken.Builder builderForValue) { if (continuationTokenBuilder_ == null) { continuationToken_ = builderForValue.build(); onChanged(); @@ -5561,11 +5792,11 @@ public Builder setContinuationToken(StreamContinuationToken.Builder builderForVa * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - public Builder mergeContinuationToken(StreamContinuationToken value) { + public Builder mergeContinuationToken(com.google.bigtable.v2.StreamContinuationToken value) { if (continuationTokenBuilder_ == null) { if (continuationToken_ != null) { continuationToken_ = - StreamContinuationToken.newBuilder(continuationToken_) + com.google.bigtable.v2.StreamContinuationToken.newBuilder(continuationToken_) .mergeFrom(value) .buildPartial(); } else { @@ -5609,7 +5840,7 @@ public Builder clearContinuationToken() { * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - public StreamContinuationToken.Builder getContinuationTokenBuilder() { + public com.google.bigtable.v2.StreamContinuationToken.Builder getContinuationTokenBuilder() { onChanged(); return getContinuationTokenFieldBuilder().getBuilder(); @@ -5624,12 +5855,13 @@ public StreamContinuationToken.Builder getContinuationTokenBuilder() { * * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ - public StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { + public com.google.bigtable.v2.StreamContinuationTokenOrBuilder + getContinuationTokenOrBuilder() { if (continuationTokenBuilder_ != null) { return continuationTokenBuilder_.getMessageOrBuilder(); } else { return continuationToken_ == null - ? StreamContinuationToken.getDefaultInstance() + ? com.google.bigtable.v2.StreamContinuationToken.getDefaultInstance() : continuationToken_; } } @@ -5644,16 +5876,16 @@ public StreamContinuationTokenOrBuilder getContinuationTokenOrBuilder() { * .google.bigtable.v2.StreamContinuationToken continuation_token = 1; */ private com.google.protobuf.SingleFieldBuilderV3< - StreamContinuationToken, - StreamContinuationToken.Builder, - StreamContinuationTokenOrBuilder> + com.google.bigtable.v2.StreamContinuationToken, + com.google.bigtable.v2.StreamContinuationToken.Builder, + com.google.bigtable.v2.StreamContinuationTokenOrBuilder> getContinuationTokenFieldBuilder() { if (continuationTokenBuilder_ == null) { continuationTokenBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - StreamContinuationToken, - StreamContinuationToken.Builder, - StreamContinuationTokenOrBuilder>( + com.google.bigtable.v2.StreamContinuationToken, + com.google.bigtable.v2.StreamContinuationToken.Builder, + com.google.bigtable.v2.StreamContinuationTokenOrBuilder>( getContinuationToken(), getParentForChildren(), isClean()); continuationToken_ = null; } @@ -5863,13 +6095,13 @@ public com.google.protobuf.TimestampOrBuilder getLowWatermarkOrBuilder() { return lowWatermarkBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -5879,19 +6111,19 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) - private static final Heartbeat DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new Heartbeat(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat(); } - public static Heartbeat getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public Heartbeat parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -5904,13 +6136,13 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public Heartbeat getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } @@ -5965,7 +6197,7 @@ public interface CloseStreamOrBuilder * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - java.util.List getContinuationTokensList(); + java.util.List getContinuationTokensList(); /** * * @@ -5976,7 +6208,7 @@ public interface CloseStreamOrBuilder * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - StreamContinuationToken getContinuationTokens(int index); + com.google.bigtable.v2.StreamContinuationToken getContinuationTokens(int index); /** * * @@ -5998,7 +6230,8 @@ public interface CloseStreamOrBuilder * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - java.util.List getContinuationTokensOrBuilderList(); + java.util.List + getContinuationTokensOrBuilderList(); /** * * @@ -6009,7 +6242,8 @@ public interface CloseStreamOrBuilder * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index); + com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder( + int index); } /** * @@ -6038,13 +6272,13 @@ private CloseStream() { continuationTokens_ = java.util.Collections.emptyList(); } - @Override + @java.lang.Override @SuppressWarnings({"unused"}) - protected Object newInstance(UnusedPrivateParameter unused) { + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new CloseStream(); } - @Override + @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } @@ -6055,7 +6289,7 @@ private CloseStream( throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { - throw new NullPointerException(); + throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = @@ -6085,11 +6319,14 @@ private CloseStream( case 18: { if (!((mutable_bitField0_ & 0x00000001) != 0)) { - continuationTokens_ = new java.util.ArrayList(); + continuationTokens_ = + new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } continuationTokens_.add( - input.readMessage(StreamContinuationToken.parser(), extensionRegistry)); + input.readMessage( + com.google.bigtable.v2.StreamContinuationToken.parser(), + extensionRegistry)); break; } default: @@ -6117,15 +6354,18 @@ private CloseStream( } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_fieldAccessorTable - .ensureFieldAccessorsInitialized(CloseStream.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.class, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder.class); } public static final int STATUS_FIELD_NUMBER = 1; @@ -6141,7 +6381,7 @@ protected FieldAccessorTable internalGetFieldAccessorTable() { * * @return Whether the status field is set. */ - @Override + @java.lang.Override public boolean hasStatus() { return status_ != null; } @@ -6156,7 +6396,7 @@ public boolean hasStatus() { * * @return The status. */ - @Override + @java.lang.Override public com.google.rpc.Status getStatus() { return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; } @@ -6169,13 +6409,13 @@ public com.google.rpc.Status getStatus() { * * .google.rpc.Status status = 1; */ - @Override + @java.lang.Override public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { return getStatus(); } public static final int CONTINUATION_TOKENS_FIELD_NUMBER = 2; - private java.util.List continuationTokens_; + private java.util.List continuationTokens_; /** * * @@ -6186,8 +6426,9 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - @Override - public java.util.List getContinuationTokensList() { + @java.lang.Override + public java.util.List + getContinuationTokensList() { return continuationTokens_; } /** @@ -6200,8 +6441,8 @@ public java.util.List getContinuationTokensList() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - @Override - public java.util.List + @java.lang.Override + public java.util.List getContinuationTokensOrBuilderList() { return continuationTokens_; } @@ -6215,7 +6456,7 @@ public java.util.List getContinuationTokensList() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - @Override + @java.lang.Override public int getContinuationTokensCount() { return continuationTokens_.size(); } @@ -6229,8 +6470,8 @@ public int getContinuationTokensCount() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - @Override - public StreamContinuationToken getContinuationTokens(int index) { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationToken getContinuationTokens(int index) { return continuationTokens_.get(index); } /** @@ -6243,14 +6484,15 @@ public StreamContinuationToken getContinuationTokens(int index) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - @Override - public StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index) { + @java.lang.Override + public com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder( + int index) { return continuationTokens_.get(index); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -6260,7 +6502,7 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (status_ != null) { output.writeMessage(1, getStatus()); @@ -6271,7 +6513,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -6289,15 +6531,16 @@ public int getSerializedSize() { return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof CloseStream)) { + if (!(obj instanceof com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream)) { return super.equals(obj); } - CloseStream other = (CloseStream) obj; + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream other = + (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) obj; if (hasStatus() != other.hasStatus()) return false; if (hasStatus()) { @@ -6308,7 +6551,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -6328,69 +6571,71 @@ public int hashCode() { return hash; } - public static CloseStream parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static CloseStream parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static CloseStream parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static CloseStream parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static CloseStream parseFrom(byte[] data) + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static CloseStream parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static CloseStream parseFrom(java.io.InputStream input) throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static CloseStream parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static CloseStream parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static CloseStream parseDelimitedFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static CloseStream parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static CloseStream parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6398,7 +6643,7 @@ public static CloseStream parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -6407,17 +6652,19 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(CloseStream prototype) { + public static Builder newBuilder( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -6438,17 +6685,20 @@ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) - CloseStreamOrBuilder { + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_fieldAccessorTable - .ensureFieldAccessorsInitialized(CloseStream.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.class, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder.class); } // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.newBuilder() @@ -6456,7 +6706,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6467,7 +6717,7 @@ private void maybeForceBuilderInitialization() { } } - @Override + @java.lang.Override public Builder clear() { super.clear(); if (statusBuilder_ == null) { @@ -6485,29 +6735,31 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor; } - @Override - public CloseStream getDefaultInstanceForType() { - return CloseStream.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream + getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } - @Override - public CloseStream build() { - CloseStream result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream build() { + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public CloseStream buildPartial() { - CloseStream result = new CloseStream(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream buildPartial() { + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream result = + new com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream(this); int from_bitField0_ = bitField0_; if (statusBuilder_ == null) { result.status_ = status_; @@ -6527,50 +6779,55 @@ public CloseStream buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof CloseStream) { - return mergeFrom((CloseStream) other); + if (other instanceof com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) { + return mergeFrom((com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(CloseStream other) { - if (other == CloseStream.getDefaultInstance()) return this; + public Builder mergeFrom(com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream other) { + if (other + == com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance()) + return this; if (other.hasStatus()) { mergeStatus(other.getStatus()); } @@ -6606,21 +6863,23 @@ public Builder mergeFrom(CloseStream other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - CloseStream parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (CloseStream) e.getUnfinishedMessage(); + parsedMessage = + (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) + e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -6805,21 +7064,22 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { return statusBuilder_; } - private java.util.List continuationTokens_ = + private java.util.List continuationTokens_ = java.util.Collections.emptyList(); private void ensureContinuationTokensIsMutable() { if (!((bitField0_ & 0x00000001) != 0)) { continuationTokens_ = - new java.util.ArrayList(continuationTokens_); + new java.util.ArrayList( + continuationTokens_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< - StreamContinuationToken, - StreamContinuationToken.Builder, - StreamContinuationTokenOrBuilder> + com.google.bigtable.v2.StreamContinuationToken, + com.google.bigtable.v2.StreamContinuationToken.Builder, + com.google.bigtable.v2.StreamContinuationTokenOrBuilder> continuationTokensBuilder_; /** @@ -6832,7 +7092,8 @@ private void ensureContinuationTokensIsMutable() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public java.util.List getContinuationTokensList() { + public java.util.List + getContinuationTokensList() { if (continuationTokensBuilder_ == null) { return java.util.Collections.unmodifiableList(continuationTokens_); } else { @@ -6866,7 +7127,7 @@ public int getContinuationTokensCount() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public StreamContinuationToken getContinuationTokens(int index) { + public com.google.bigtable.v2.StreamContinuationToken getContinuationTokens(int index) { if (continuationTokensBuilder_ == null) { return continuationTokens_.get(index); } else { @@ -6883,7 +7144,8 @@ public StreamContinuationToken getContinuationTokens(int index) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public Builder setContinuationTokens(int index, StreamContinuationToken value) { + public Builder setContinuationTokens( + int index, com.google.bigtable.v2.StreamContinuationToken value) { if (continuationTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -6907,7 +7169,7 @@ public Builder setContinuationTokens(int index, StreamContinuationToken value) { * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ public Builder setContinuationTokens( - int index, StreamContinuationToken.Builder builderForValue) { + int index, com.google.bigtable.v2.StreamContinuationToken.Builder builderForValue) { if (continuationTokensBuilder_ == null) { ensureContinuationTokensIsMutable(); continuationTokens_.set(index, builderForValue.build()); @@ -6927,7 +7189,7 @@ public Builder setContinuationTokens( * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public Builder addContinuationTokens(StreamContinuationToken value) { + public Builder addContinuationTokens(com.google.bigtable.v2.StreamContinuationToken value) { if (continuationTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -6950,7 +7212,8 @@ public Builder addContinuationTokens(StreamContinuationToken value) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public Builder addContinuationTokens(int index, StreamContinuationToken value) { + public Builder addContinuationTokens( + int index, com.google.bigtable.v2.StreamContinuationToken value) { if (continuationTokensBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -6973,7 +7236,8 @@ public Builder addContinuationTokens(int index, StreamContinuationToken value) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public Builder addContinuationTokens(StreamContinuationToken.Builder builderForValue) { + public Builder addContinuationTokens( + com.google.bigtable.v2.StreamContinuationToken.Builder builderForValue) { if (continuationTokensBuilder_ == null) { ensureContinuationTokensIsMutable(); continuationTokens_.add(builderForValue.build()); @@ -6994,7 +7258,7 @@ public Builder addContinuationTokens(StreamContinuationToken.Builder builderForV * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ public Builder addContinuationTokens( - int index, StreamContinuationToken.Builder builderForValue) { + int index, com.google.bigtable.v2.StreamContinuationToken.Builder builderForValue) { if (continuationTokensBuilder_ == null) { ensureContinuationTokensIsMutable(); continuationTokens_.add(index, builderForValue.build()); @@ -7014,7 +7278,8 @@ public Builder addContinuationTokens( * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public Builder addAllContinuationTokens(Iterable values) { + public Builder addAllContinuationTokens( + java.lang.Iterable values) { if (continuationTokensBuilder_ == null) { ensureContinuationTokensIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, continuationTokens_); @@ -7074,7 +7339,8 @@ public Builder removeContinuationTokens(int index) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public StreamContinuationToken.Builder getContinuationTokensBuilder(int index) { + public com.google.bigtable.v2.StreamContinuationToken.Builder getContinuationTokensBuilder( + int index) { return getContinuationTokensFieldBuilder().getBuilder(index); } /** @@ -7087,7 +7353,8 @@ public StreamContinuationToken.Builder getContinuationTokensBuilder(int index) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index) { + public com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder( + int index) { if (continuationTokensBuilder_ == null) { return continuationTokens_.get(index); } else { @@ -7104,7 +7371,7 @@ public StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public java.util.List + public java.util.List getContinuationTokensOrBuilderList() { if (continuationTokensBuilder_ != null) { return continuationTokensBuilder_.getMessageOrBuilderList(); @@ -7122,9 +7389,9 @@ public StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(int index * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public StreamContinuationToken.Builder addContinuationTokensBuilder() { + public com.google.bigtable.v2.StreamContinuationToken.Builder addContinuationTokensBuilder() { return getContinuationTokensFieldBuilder() - .addBuilder(StreamContinuationToken.getDefaultInstance()); + .addBuilder(com.google.bigtable.v2.StreamContinuationToken.getDefaultInstance()); } /** * @@ -7136,9 +7403,10 @@ public StreamContinuationToken.Builder addContinuationTokensBuilder() { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public StreamContinuationToken.Builder addContinuationTokensBuilder(int index) { + public com.google.bigtable.v2.StreamContinuationToken.Builder addContinuationTokensBuilder( + int index) { return getContinuationTokensFieldBuilder() - .addBuilder(index, StreamContinuationToken.getDefaultInstance()); + .addBuilder(index, com.google.bigtable.v2.StreamContinuationToken.getDefaultInstance()); } /** * @@ -7150,21 +7418,22 @@ public StreamContinuationToken.Builder addContinuationTokensBuilder(int index) { * * repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2; */ - public java.util.List getContinuationTokensBuilderList() { + public java.util.List + getContinuationTokensBuilderList() { return getContinuationTokensFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< - StreamContinuationToken, - StreamContinuationToken.Builder, - StreamContinuationTokenOrBuilder> + com.google.bigtable.v2.StreamContinuationToken, + com.google.bigtable.v2.StreamContinuationToken.Builder, + com.google.bigtable.v2.StreamContinuationTokenOrBuilder> getContinuationTokensFieldBuilder() { if (continuationTokensBuilder_ == null) { continuationTokensBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< - StreamContinuationToken, - StreamContinuationToken.Builder, - StreamContinuationTokenOrBuilder>( + com.google.bigtable.v2.StreamContinuationToken, + com.google.bigtable.v2.StreamContinuationToken.Builder, + com.google.bigtable.v2.StreamContinuationTokenOrBuilder>( continuationTokens_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), @@ -7174,13 +7443,13 @@ public java.util.List getContinuationTokensBuil return continuationTokensBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -7190,19 +7459,20 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse.CloseStream) - private static final CloseStream DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream + DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new CloseStream(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream(); } - public static CloseStream getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public CloseStream parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -7215,21 +7485,24 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public CloseStream getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int streamRecordCase_ = 0; - private Object streamRecord_; + private java.lang.Object streamRecord_; - public enum StreamRecordCase implements com.google.protobuf.Internal.EnumLite, InternalOneOfEnum { + public enum StreamRecordCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { DATA_CHANGE(1), HEARTBEAT(2), CLOSE_STREAM(3), @@ -7244,7 +7517,7 @@ private StreamRecordCase(int value) { * @return The enum associated with the given number. * @deprecated Use {@link #forNumber(int)} instead. */ - @Deprecated + @java.lang.Deprecated public static StreamRecordCase valueOf(int value) { return forNumber(value); } @@ -7285,7 +7558,7 @@ public StreamRecordCase getStreamRecordCase() { * * @return Whether the dataChange field is set. */ - @Override + @java.lang.Override public boolean hasDataChange() { return streamRecordCase_ == 1; } @@ -7300,12 +7573,12 @@ public boolean hasDataChange() { * * @return The dataChange. */ - @Override - public DataChange getDataChange() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange getDataChange() { if (streamRecordCase_ == 1) { - return (DataChange) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_; } - return DataChange.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } /** * @@ -7316,12 +7589,13 @@ public DataChange getDataChange() { * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - @Override - public DataChangeOrBuilder getDataChangeOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder + getDataChangeOrBuilder() { if (streamRecordCase_ == 1) { - return (DataChange) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_; } - return DataChange.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } public static final int HEARTBEAT_FIELD_NUMBER = 2; @@ -7336,7 +7610,7 @@ public DataChangeOrBuilder getDataChangeOrBuilder() { * * @return Whether the heartbeat field is set. */ - @Override + @java.lang.Override public boolean hasHeartbeat() { return streamRecordCase_ == 2; } @@ -7351,12 +7625,12 @@ public boolean hasHeartbeat() { * * @return The heartbeat. */ - @Override - public Heartbeat getHeartbeat() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat getHeartbeat() { if (streamRecordCase_ == 2) { - return (Heartbeat) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_; } - return Heartbeat.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } /** * @@ -7367,12 +7641,13 @@ public Heartbeat getHeartbeat() { * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - @Override - public HeartbeatOrBuilder getHeartbeatOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder + getHeartbeatOrBuilder() { if (streamRecordCase_ == 2) { - return (Heartbeat) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_; } - return Heartbeat.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } public static final int CLOSE_STREAM_FIELD_NUMBER = 3; @@ -7387,7 +7662,7 @@ public HeartbeatOrBuilder getHeartbeatOrBuilder() { * * @return Whether the closeStream field is set. */ - @Override + @java.lang.Override public boolean hasCloseStream() { return streamRecordCase_ == 3; } @@ -7402,12 +7677,12 @@ public boolean hasCloseStream() { * * @return The closeStream. */ - @Override - public CloseStream getCloseStream() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream getCloseStream() { if (streamRecordCase_ == 3) { - return (CloseStream) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_; } - return CloseStream.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } /** * @@ -7418,17 +7693,18 @@ public CloseStream getCloseStream() { * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - @Override - public CloseStreamOrBuilder getCloseStreamOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder + getCloseStreamOrBuilder() { if (streamRecordCase_ == 3) { - return (CloseStream) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_; } - return CloseStream.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } private byte memoizedIsInitialized = -1; - @Override + @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -7438,21 +7714,24 @@ public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (streamRecordCase_ == 1) { - output.writeMessage(1, (DataChange) streamRecord_); + output.writeMessage( + 1, (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_); } if (streamRecordCase_ == 2) { - output.writeMessage(2, (Heartbeat) streamRecord_); + output.writeMessage( + 2, (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_); } if (streamRecordCase_ == 3) { - output.writeMessage(3, (CloseStream) streamRecord_); + output.writeMessage( + 3, (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_); } unknownFields.writeTo(output); } - @Override + @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -7460,30 +7739,34 @@ public int getSerializedSize() { size = 0; if (streamRecordCase_ == 1) { size += - com.google.protobuf.CodedOutputStream.computeMessageSize(1, (DataChange) streamRecord_); + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_); } if (streamRecordCase_ == 2) { size += - com.google.protobuf.CodedOutputStream.computeMessageSize(2, (Heartbeat) streamRecord_); + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_); } if (streamRecordCase_ == 3) { size += - com.google.protobuf.CodedOutputStream.computeMessageSize(3, (CloseStream) streamRecord_); + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } - @Override - public boolean equals(final Object obj) { + @java.lang.Override + public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } - if (!(obj instanceof ReadChangeStreamResponse)) { + if (!(obj instanceof com.google.bigtable.v2.ReadChangeStreamResponse)) { return super.equals(obj); } - ReadChangeStreamResponse other = (ReadChangeStreamResponse) obj; + com.google.bigtable.v2.ReadChangeStreamResponse other = + (com.google.bigtable.v2.ReadChangeStreamResponse) obj; if (!getStreamRecordCase().equals(other.getStreamRecordCase())) return false; switch (streamRecordCase_) { @@ -7503,7 +7786,7 @@ public boolean equals(final Object obj) { return true; } - @Override + @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -7531,70 +7814,71 @@ public int hashCode() { return hash; } - public static ReadChangeStreamResponse parseFrom(java.nio.ByteBuffer data) + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ReadChangeStreamResponse parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ReadChangeStreamResponse parseFrom(com.google.protobuf.ByteString data) + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( + com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ReadChangeStreamResponse parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ReadChangeStreamResponse parseFrom(byte[] data) + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static ReadChangeStreamResponse parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static ReadChangeStreamResponse parseFrom(java.io.InputStream input) + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ReadChangeStreamResponse parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static ReadChangeStreamResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static ReadChangeStreamResponse parseDelimitedFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static ReadChangeStreamResponse parseFrom(com.google.protobuf.CodedInputStream input) - throws java.io.IOException { + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static ReadChangeStreamResponse parseFrom( + public static com.google.bigtable.v2.ReadChangeStreamResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7602,7 +7886,7 @@ public static ReadChangeStreamResponse parseFrom( PARSER, input, extensionRegistry); } - @Override + @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } @@ -7611,17 +7895,17 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(ReadChangeStreamResponse prototype) { + public static Builder newBuilder(com.google.bigtable.v2.ReadChangeStreamResponse prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @Override + @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @Override - protected Builder newBuilderForType(BuilderParent parent) { + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -7638,16 +7922,20 @@ protected Builder newBuilderForType(BuilderParent parent) { public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.bigtable.v2.ReadChangeStreamResponse) - ReadChangeStreamResponseOrBuilder { + com.google.bigtable.v2.ReadChangeStreamResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; } - @Override - protected FieldAccessorTable internalGetFieldAccessorTable() { - return BigtableProto + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.v2.BigtableProto .internal_static_google_bigtable_v2_ReadChangeStreamResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized(ReadChangeStreamResponse.class, Builder.class); + .ensureFieldAccessorsInitialized( + com.google.bigtable.v2.ReadChangeStreamResponse.class, + com.google.bigtable.v2.ReadChangeStreamResponse.Builder.class); } // Construct using com.google.bigtable.v2.ReadChangeStreamResponse.newBuilder() @@ -7655,7 +7943,7 @@ private Builder() { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -7664,7 +7952,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @Override + @java.lang.Override public Builder clear() { super.clear(); streamRecordCase_ = 0; @@ -7672,28 +7960,30 @@ public Builder clear() { return this; } - @Override + @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return BigtableProto.internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; + return com.google.bigtable.v2.BigtableProto + .internal_static_google_bigtable_v2_ReadChangeStreamResponse_descriptor; } - @Override - public ReadChangeStreamResponse getDefaultInstanceForType() { - return ReadChangeStreamResponse.getDefaultInstance(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse getDefaultInstanceForType() { + return com.google.bigtable.v2.ReadChangeStreamResponse.getDefaultInstance(); } - @Override - public ReadChangeStreamResponse build() { - ReadChangeStreamResponse result = buildPartial(); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse build() { + com.google.bigtable.v2.ReadChangeStreamResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @Override - public ReadChangeStreamResponse buildPartial() { - ReadChangeStreamResponse result = new ReadChangeStreamResponse(this); + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse buildPartial() { + com.google.bigtable.v2.ReadChangeStreamResponse result = + new com.google.bigtable.v2.ReadChangeStreamResponse(this); if (streamRecordCase_ == 1) { if (dataChangeBuilder_ == null) { result.streamRecord_ = streamRecord_; @@ -7720,50 +8010,52 @@ public ReadChangeStreamResponse buildPartial() { return result; } - @Override + @java.lang.Override public Builder clone() { return super.clone(); } - @Override - public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } - @Override + @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @Override + @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @Override + @java.lang.Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } - @Override + @java.lang.Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } - @Override + @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof ReadChangeStreamResponse) { - return mergeFrom((ReadChangeStreamResponse) other); + if (other instanceof com.google.bigtable.v2.ReadChangeStreamResponse) { + return mergeFrom((com.google.bigtable.v2.ReadChangeStreamResponse) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(ReadChangeStreamResponse other) { - if (other == ReadChangeStreamResponse.getDefaultInstance()) return this; + public Builder mergeFrom(com.google.bigtable.v2.ReadChangeStreamResponse other) { + if (other == com.google.bigtable.v2.ReadChangeStreamResponse.getDefaultInstance()) + return this; switch (other.getStreamRecordCase()) { case DATA_CHANGE: { @@ -7790,21 +8082,21 @@ public Builder mergeFrom(ReadChangeStreamResponse other) { return this; } - @Override + @java.lang.Override public final boolean isInitialized() { return true; } - @Override + @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - ReadChangeStreamResponse parsedMessage = null; + com.google.bigtable.v2.ReadChangeStreamResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (ReadChangeStreamResponse) e.getUnfinishedMessage(); + parsedMessage = (com.google.bigtable.v2.ReadChangeStreamResponse) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { @@ -7815,7 +8107,7 @@ public Builder mergeFrom( } private int streamRecordCase_ = 0; - private Object streamRecord_; + private java.lang.Object streamRecord_; public StreamRecordCase getStreamRecordCase() { return StreamRecordCase.forNumber(streamRecordCase_); @@ -7829,7 +8121,9 @@ public Builder clearStreamRecord() { } private com.google.protobuf.SingleFieldBuilderV3< - DataChange, DataChange.Builder, DataChangeOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder> dataChangeBuilder_; /** * @@ -7842,7 +8136,7 @@ public Builder clearStreamRecord() { * * @return Whether the dataChange field is set. */ - @Override + @java.lang.Override public boolean hasDataChange() { return streamRecordCase_ == 1; } @@ -7857,18 +8151,18 @@ public boolean hasDataChange() { * * @return The dataChange. */ - @Override - public DataChange getDataChange() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange getDataChange() { if (dataChangeBuilder_ == null) { if (streamRecordCase_ == 1) { - return (DataChange) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_; } - return DataChange.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } else { if (streamRecordCase_ == 1) { return dataChangeBuilder_.getMessage(); } - return DataChange.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } } /** @@ -7880,7 +8174,7 @@ public DataChange getDataChange() { * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - public Builder setDataChange(DataChange value) { + public Builder setDataChange(com.google.bigtable.v2.ReadChangeStreamResponse.DataChange value) { if (dataChangeBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -7902,7 +8196,8 @@ public Builder setDataChange(DataChange value) { * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - public Builder setDataChange(DataChange.Builder builderForValue) { + public Builder setDataChange( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder builderForValue) { if (dataChangeBuilder_ == null) { streamRecord_ = builderForValue.build(); onChanged(); @@ -7921,11 +8216,18 @@ public Builder setDataChange(DataChange.Builder builderForValue) { * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - public Builder mergeDataChange(DataChange value) { + public Builder mergeDataChange( + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange value) { if (dataChangeBuilder_ == null) { - if (streamRecordCase_ == 1 && streamRecord_ != DataChange.getDefaultInstance()) { + if (streamRecordCase_ == 1 + && streamRecord_ + != com.google.bigtable.v2.ReadChangeStreamResponse.DataChange + .getDefaultInstance()) { streamRecord_ = - DataChange.newBuilder((DataChange) streamRecord_).mergeFrom(value).buildPartial(); + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.newBuilder( + (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_) + .mergeFrom(value) + .buildPartial(); } else { streamRecord_ = value; } @@ -7974,7 +8276,8 @@ public Builder clearDataChange() { * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - public DataChange.Builder getDataChangeBuilder() { + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder + getDataChangeBuilder() { return getDataChangeFieldBuilder().getBuilder(); } /** @@ -7986,15 +8289,16 @@ public DataChange.Builder getDataChangeBuilder() { * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - @Override - public DataChangeOrBuilder getDataChangeOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder + getDataChangeOrBuilder() { if ((streamRecordCase_ == 1) && (dataChangeBuilder_ != null)) { return dataChangeBuilder_.getMessageOrBuilder(); } else { if (streamRecordCase_ == 1) { - return (DataChange) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_; } - return DataChange.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } } /** @@ -8007,16 +8311,23 @@ public DataChangeOrBuilder getDataChangeOrBuilder() { * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ private com.google.protobuf.SingleFieldBuilderV3< - DataChange, DataChange.Builder, DataChangeOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder> getDataChangeFieldBuilder() { if (dataChangeBuilder_ == null) { if (!(streamRecordCase_ == 1)) { - streamRecord_ = DataChange.getDefaultInstance(); + streamRecord_ = + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.getDefaultInstance(); } dataChangeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - DataChange, DataChange.Builder, DataChangeOrBuilder>( - (DataChange) streamRecord_, getParentForChildren(), isClean()); + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder>( + (com.google.bigtable.v2.ReadChangeStreamResponse.DataChange) streamRecord_, + getParentForChildren(), + isClean()); streamRecord_ = null; } streamRecordCase_ = 1; @@ -8026,7 +8337,9 @@ public DataChangeOrBuilder getDataChangeOrBuilder() { } private com.google.protobuf.SingleFieldBuilderV3< - Heartbeat, Heartbeat.Builder, HeartbeatOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat, + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder> heartbeatBuilder_; /** * @@ -8039,7 +8352,7 @@ public DataChangeOrBuilder getDataChangeOrBuilder() { * * @return Whether the heartbeat field is set. */ - @Override + @java.lang.Override public boolean hasHeartbeat() { return streamRecordCase_ == 2; } @@ -8054,18 +8367,18 @@ public boolean hasHeartbeat() { * * @return The heartbeat. */ - @Override - public Heartbeat getHeartbeat() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat getHeartbeat() { if (heartbeatBuilder_ == null) { if (streamRecordCase_ == 2) { - return (Heartbeat) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_; } - return Heartbeat.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } else { if (streamRecordCase_ == 2) { return heartbeatBuilder_.getMessage(); } - return Heartbeat.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } } /** @@ -8077,7 +8390,7 @@ public Heartbeat getHeartbeat() { * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - public Builder setHeartbeat(Heartbeat value) { + public Builder setHeartbeat(com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat value) { if (heartbeatBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -8099,7 +8412,8 @@ public Builder setHeartbeat(Heartbeat value) { * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - public Builder setHeartbeat(Heartbeat.Builder builderForValue) { + public Builder setHeartbeat( + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder builderForValue) { if (heartbeatBuilder_ == null) { streamRecord_ = builderForValue.build(); onChanged(); @@ -8118,11 +8432,16 @@ public Builder setHeartbeat(Heartbeat.Builder builderForValue) { * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - public Builder mergeHeartbeat(Heartbeat value) { + public Builder mergeHeartbeat(com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat value) { if (heartbeatBuilder_ == null) { - if (streamRecordCase_ == 2 && streamRecord_ != Heartbeat.getDefaultInstance()) { + if (streamRecordCase_ == 2 + && streamRecord_ + != com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance()) { streamRecord_ = - Heartbeat.newBuilder((Heartbeat) streamRecord_).mergeFrom(value).buildPartial(); + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.newBuilder( + (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_) + .mergeFrom(value) + .buildPartial(); } else { streamRecord_ = value; } @@ -8171,7 +8490,7 @@ public Builder clearHeartbeat() { * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - public Heartbeat.Builder getHeartbeatBuilder() { + public com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder getHeartbeatBuilder() { return getHeartbeatFieldBuilder().getBuilder(); } /** @@ -8183,15 +8502,16 @@ public Heartbeat.Builder getHeartbeatBuilder() { * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - @Override - public HeartbeatOrBuilder getHeartbeatOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder + getHeartbeatOrBuilder() { if ((streamRecordCase_ == 2) && (heartbeatBuilder_ != null)) { return heartbeatBuilder_.getMessageOrBuilder(); } else { if (streamRecordCase_ == 2) { - return (Heartbeat) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_; } - return Heartbeat.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } } /** @@ -8204,16 +8524,23 @@ public HeartbeatOrBuilder getHeartbeatOrBuilder() { * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ private com.google.protobuf.SingleFieldBuilderV3< - Heartbeat, Heartbeat.Builder, HeartbeatOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat, + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder> getHeartbeatFieldBuilder() { if (heartbeatBuilder_ == null) { if (!(streamRecordCase_ == 2)) { - streamRecord_ = Heartbeat.getDefaultInstance(); + streamRecord_ = + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.getDefaultInstance(); } heartbeatBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - Heartbeat, Heartbeat.Builder, HeartbeatOrBuilder>( - (Heartbeat) streamRecord_, getParentForChildren(), isClean()); + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat, + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder>( + (com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat) streamRecord_, + getParentForChildren(), + isClean()); streamRecord_ = null; } streamRecordCase_ = 2; @@ -8223,7 +8550,9 @@ public HeartbeatOrBuilder getHeartbeatOrBuilder() { } private com.google.protobuf.SingleFieldBuilderV3< - CloseStream, CloseStream.Builder, CloseStreamOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder> closeStreamBuilder_; /** * @@ -8236,7 +8565,7 @@ public HeartbeatOrBuilder getHeartbeatOrBuilder() { * * @return Whether the closeStream field is set. */ - @Override + @java.lang.Override public boolean hasCloseStream() { return streamRecordCase_ == 3; } @@ -8251,18 +8580,18 @@ public boolean hasCloseStream() { * * @return The closeStream. */ - @Override - public CloseStream getCloseStream() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream getCloseStream() { if (closeStreamBuilder_ == null) { if (streamRecordCase_ == 3) { - return (CloseStream) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_; } - return CloseStream.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } else { if (streamRecordCase_ == 3) { return closeStreamBuilder_.getMessage(); } - return CloseStream.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } } /** @@ -8274,7 +8603,8 @@ public CloseStream getCloseStream() { * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - public Builder setCloseStream(CloseStream value) { + public Builder setCloseStream( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream value) { if (closeStreamBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -8296,7 +8626,8 @@ public Builder setCloseStream(CloseStream value) { * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - public Builder setCloseStream(CloseStream.Builder builderForValue) { + public Builder setCloseStream( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder builderForValue) { if (closeStreamBuilder_ == null) { streamRecord_ = builderForValue.build(); onChanged(); @@ -8315,11 +8646,18 @@ public Builder setCloseStream(CloseStream.Builder builderForValue) { * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - public Builder mergeCloseStream(CloseStream value) { + public Builder mergeCloseStream( + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream value) { if (closeStreamBuilder_ == null) { - if (streamRecordCase_ == 3 && streamRecord_ != CloseStream.getDefaultInstance()) { + if (streamRecordCase_ == 3 + && streamRecord_ + != com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream + .getDefaultInstance()) { streamRecord_ = - CloseStream.newBuilder((CloseStream) streamRecord_).mergeFrom(value).buildPartial(); + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.newBuilder( + (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_) + .mergeFrom(value) + .buildPartial(); } else { streamRecord_ = value; } @@ -8368,7 +8706,8 @@ public Builder clearCloseStream() { * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - public CloseStream.Builder getCloseStreamBuilder() { + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder + getCloseStreamBuilder() { return getCloseStreamFieldBuilder().getBuilder(); } /** @@ -8380,15 +8719,16 @@ public CloseStream.Builder getCloseStreamBuilder() { * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - @Override - public CloseStreamOrBuilder getCloseStreamOrBuilder() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder + getCloseStreamOrBuilder() { if ((streamRecordCase_ == 3) && (closeStreamBuilder_ != null)) { return closeStreamBuilder_.getMessageOrBuilder(); } else { if (streamRecordCase_ == 3) { - return (CloseStream) streamRecord_; + return (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_; } - return CloseStream.getDefaultInstance(); + return com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } } /** @@ -8401,16 +8741,23 @@ public CloseStreamOrBuilder getCloseStreamOrBuilder() { * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ private com.google.protobuf.SingleFieldBuilderV3< - CloseStream, CloseStream.Builder, CloseStreamOrBuilder> + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder> getCloseStreamFieldBuilder() { if (closeStreamBuilder_ == null) { if (!(streamRecordCase_ == 3)) { - streamRecord_ = CloseStream.getDefaultInstance(); + streamRecord_ = + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.getDefaultInstance(); } closeStreamBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< - CloseStream, CloseStream.Builder, CloseStreamOrBuilder>( - (CloseStream) streamRecord_, getParentForChildren(), isClean()); + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream.Builder, + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder>( + (com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream) streamRecord_, + getParentForChildren(), + isClean()); streamRecord_ = null; } streamRecordCase_ = 3; @@ -8419,12 +8766,12 @@ public CloseStreamOrBuilder getCloseStreamOrBuilder() { return closeStreamBuilder_; } - @Override + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @Override + @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); @@ -8434,19 +8781,19 @@ public final Builder mergeUnknownFields( } // @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadChangeStreamResponse) - private static final ReadChangeStreamResponse DEFAULT_INSTANCE; + private static final com.google.bigtable.v2.ReadChangeStreamResponse DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new ReadChangeStreamResponse(); + DEFAULT_INSTANCE = new com.google.bigtable.v2.ReadChangeStreamResponse(); } - public static ReadChangeStreamResponse getDefaultInstance() { + public static com.google.bigtable.v2.ReadChangeStreamResponse getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @Override + @java.lang.Override public ReadChangeStreamResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -8459,13 +8806,13 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @Override + @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @Override - public ReadChangeStreamResponse getDefaultInstanceForType() { + @java.lang.Override + public com.google.bigtable.v2.ReadChangeStreamResponse getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java index 96f0b11d26..ad1efd09a4 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponseOrBuilder.java @@ -46,7 +46,7 @@ public interface ReadChangeStreamResponseOrBuilder * * @return The dataChange. */ - ReadChangeStreamResponse.DataChange getDataChange(); + com.google.bigtable.v2.ReadChangeStreamResponse.DataChange getDataChange(); /** * * @@ -56,7 +56,7 @@ public interface ReadChangeStreamResponseOrBuilder * * .google.bigtable.v2.ReadChangeStreamResponse.DataChange data_change = 1; */ - ReadChangeStreamResponse.DataChangeOrBuilder getDataChangeOrBuilder(); + com.google.bigtable.v2.ReadChangeStreamResponse.DataChangeOrBuilder getDataChangeOrBuilder(); /** * @@ -81,7 +81,7 @@ public interface ReadChangeStreamResponseOrBuilder * * @return The heartbeat. */ - ReadChangeStreamResponse.Heartbeat getHeartbeat(); + com.google.bigtable.v2.ReadChangeStreamResponse.Heartbeat getHeartbeat(); /** * * @@ -91,7 +91,7 @@ public interface ReadChangeStreamResponseOrBuilder * * .google.bigtable.v2.ReadChangeStreamResponse.Heartbeat heartbeat = 2; */ - ReadChangeStreamResponse.HeartbeatOrBuilder getHeartbeatOrBuilder(); + com.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatOrBuilder getHeartbeatOrBuilder(); /** * @@ -116,7 +116,7 @@ public interface ReadChangeStreamResponseOrBuilder * * @return The closeStream. */ - ReadChangeStreamResponse.CloseStream getCloseStream(); + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStream getCloseStream(); /** * * @@ -126,7 +126,7 @@ public interface ReadChangeStreamResponseOrBuilder * * .google.bigtable.v2.ReadChangeStreamResponse.CloseStream close_stream = 3; */ - ReadChangeStreamResponse.CloseStreamOrBuilder getCloseStreamOrBuilder(); + com.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamOrBuilder getCloseStreamOrBuilder(); - public ReadChangeStreamResponse.StreamRecordCase getStreamRecordCase(); + public com.google.bigtable.v2.ReadChangeStreamResponse.StreamRecordCase getStreamRecordCase(); } diff --git a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto index a99bb410a1..8452338077 100644 --- a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto +++ b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto @@ -47,7 +47,7 @@ option (google.api.resource_definition) = { service Bigtable { option (google.api.default_host) = "bigtable.googleapis.com"; option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigtable.data," + "https://www.googleapis.com/auth/bigtable.data," "https://www.googleapis.com/auth/bigtable.data.readonly," "https://www.googleapis.com/auth/cloud-bigtable.data," "https://www.googleapis.com/auth/cloud-bigtable.data.readonly," @@ -205,10 +205,9 @@ service Bigtable { // Returns the current list of partitions that make up the table's // change stream. The union of partitions will cover the entire keyspace. // Partitions can be read with `ReadChangeStream`. - rpc ListChangeStreamPartitions(ListChangeStreamPartitionsRequest) - returns (stream ListChangeStreamPartitionsResponse) { + rpc GenerateInitialChangeStreamPartitions(GenerateInitialChangeStreamPartitionsRequest) returns (stream GenerateInitialChangeStreamPartitionsResponse) { option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:listChangeStreamPartitions" + post: "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" body: "*" }; option (google.api.method_signature) = "table_name"; @@ -219,8 +218,7 @@ service Bigtable { // Reads changes from a table's change stream. Changes will // reflect both user-initiated mutations and mutations that are caused by // garbage collection. - rpc ReadChangeStream(ReadChangeStreamRequest) - returns (stream ReadChangeStreamResponse) { + rpc ReadChangeStream(ReadChangeStreamRequest) returns (stream ReadChangeStreamResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" body: "*" @@ -521,9 +519,7 @@ message PingAndWarmRequest { } // Response message for Bigtable.PingAndWarm connection keepalive and warming. -message PingAndWarmResponse { - -} +message PingAndWarmResponse {} // Request message for Bigtable.ReadModifyWriteRow. message ReadModifyWriteRowRequest { @@ -558,10 +554,10 @@ message ReadModifyWriteRowResponse { } // NOTE: This API is not generally available. Users must be allowlisted. -// Request message for Bigtable.ListChangeStreamPartitions. -message ListChangeStreamPartitionsRequest { - // Required. The unique name of the table from which to get change stream - // partitions. Values are of the form +// Request message for Bigtable.GenerateInitialChangeStreamPartitions. +message GenerateInitialChangeStreamPartitionsRequest { + // Required. The unique name of the table from which to get change stream partitions. + // Values are of the form // `projects//instances//tables/
  • `. // Change streaming must be enabled on the table. string table_name = 1 [ @@ -578,8 +574,8 @@ message ListChangeStreamPartitionsRequest { } // NOTE: This API is not generally available. Users must be allowlisted. -// Response message for Bigtable.ListChangeStreamPartitions. -message ListChangeStreamPartitionsResponse { +// Response message for Bigtable.GenerateInitialChangeStreamPartitions. +message GenerateInitialChangeStreamPartitionsResponse { // A partition of the change stream. StreamPartition partition = 1; } From c3086d99fc2009dcc296a6267f83d017f6e789cb Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Mon, 15 Aug 2022 12:34:54 -0400 Subject: [PATCH 11/13] =?UTF-8?q?feat:=20Change=20CDC=20related=20APIs=20t?= =?UTF-8?q?o=20return=20ByteStringRange=20instead=20of=20Ro=E2=80=A6=20(#1?= =?UTF-8?q?355)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Change CDC related APIs to return ByteStringRange instead of RowRange 1. GenerateInitialChangeStreamPartitions 2. ChangeStreamContinuationToken::GetRowRange * fix: Fix tests * fix: Address comments Co-authored-by: Teng Zhong --- .../bigtable/data/v2/BigtableDataClient.java | 23 ++++++++------- .../models/ChangeStreamContinuationToken.java | 16 ++++++---- .../cloud/bigtable/data/v2/models/Range.java | 19 ++++++++++++ .../data/v2/stub/EnhancedBigtableStub.java | 26 +++++++++-------- .../v2/stub/EnhancedBigtableStubSettings.java | 8 ++--- ...ialChangeStreamPartitionsUserCallable.java | 23 +++++++-------- .../data/v2/BigtableDataClientTests.java | 6 ++-- .../ChangeStreamContinuationTokenTest.java | 22 +++++++------- .../v2/models/ChangeStreamRecordTest.java | 11 +++++-- .../bigtable/data/v2/models/RangeTest.java | 11 +++++++ ...ChangeStreamRecordMergingCallableTest.java | 29 ++++++++++--------- ...hangeStreamPartitionsUserCallableTest.java | 17 +++-------- ...ReadChangeStreamMergingAcceptanceTest.java | 14 +++++++-- 13 files changed, 134 insertions(+), 91 deletions(-) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java index acfbff0747..77b909b7a1 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java @@ -29,7 +29,6 @@ import com.google.api.gax.rpc.ServerStream; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.UnaryCallable; -import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; @@ -37,6 +36,7 @@ import com.google.cloud.bigtable.data.v2.models.Filters.Filter; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; @@ -1503,11 +1503,11 @@ public UnaryCallable readModifyWriteRowCallable() { * String tableId = "[TABLE]"; * * try { - * ServerStream stream = bigtableDataClient.generateInitialChangeStreamPartitions(tableId); + * ServerStream stream = bigtableDataClient.generateInitialChangeStreamPartitions(tableId); * int count = 0; * * // Iterator style - * for (RowRange partition : stream) { + * for (ByteStringRange partition : stream) { * if (++count > 10) { * stream.cancel(); * break; @@ -1525,7 +1525,7 @@ public UnaryCallable readModifyWriteRowCallable() { * @see ServerStreamingCallable For call styles. */ @InternalApi("Used in Changestream beam pipeline.") - public ServerStream generateInitialChangeStreamPartitions(String tableId) { + public ServerStream generateInitialChangeStreamPartitions(String tableId) { return generateInitialChangeStreamPartitionsCallable().call(tableId); } @@ -1545,7 +1545,7 @@ public ServerStream generateInitialChangeStreamPartitions(String table * public void onStart(StreamController controller) { * this.controller = controller; * } - * public void onResponse(RowRange partition) { + * public void onResponse(ByteStringRange partition) { * if (++count > 10) { * controller.cancel(); * return; @@ -1568,7 +1568,7 @@ public ServerStream generateInitialChangeStreamPartitions(String table */ @InternalApi("Used in Changestream beam pipeline.") public void generateInitialChangeStreamPartitionsAsync( - String tableId, ResponseObserver observer) { + String tableId, ResponseObserver observer) { generateInitialChangeStreamPartitionsCallable().call(tableId, observer); } @@ -1584,7 +1584,7 @@ public void generateInitialChangeStreamPartitionsAsync( * * // Iterator style * try { - * for(RowRange partition : bigtableDataClient.generateInitialChangeStreamPartitionsCallable().call(tableId)) { + * for(ByteStringRange partition : bigtableDataClient.generateInitialChangeStreamPartitionsCallable().call(tableId)) { * // Do something with partition * } * } catch (NotFoundException e) { @@ -1595,7 +1595,7 @@ public void generateInitialChangeStreamPartitionsAsync( * * // Sync style * try { - * List partitions = bigtableDataClient.generateInitialChangeStreamPartitionsCallable().all().call(tableId); + * List partitions = bigtableDataClient.generateInitialChangeStreamPartitionsCallable().all().call(tableId); * } catch (NotFoundException e) { * System.out.println("Tried to read a non-existent table"); * } catch (RuntimeException e) { @@ -1603,10 +1603,10 @@ public void generateInitialChangeStreamPartitionsAsync( * } * * // Point look up - * ApiFuture partitionFuture = + * ApiFuture partitionFuture = * bigtableDataClient.generateInitialChangeStreamPartitionsCallable().first().futureCall(tableId); * - * ApiFutures.addCallback(partitionFuture, new ApiFutureCallback() { + * ApiFutures.addCallback(partitionFuture, new ApiFutureCallback() { * public void onFailure(Throwable t) { * if (t instanceof NotFoundException) { * System.out.println("Tried to read a non-existent table"); @@ -1626,7 +1626,8 @@ public void generateInitialChangeStreamPartitionsAsync( * @see ServerStreamingCallable For call styles. */ @InternalApi("Used in Changestream beam pipeline.") - public ServerStreamingCallable generateInitialChangeStreamPartitionsCallable() { + public ServerStreamingCallable + generateInitialChangeStreamPartitionsCallable() { return stub.generateInitialChangeStreamPartitionsCallable(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java index af7b15ea4e..06e975c827 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationToken.java @@ -54,9 +54,13 @@ public ChangeStreamContinuationToken( .build(); } - // TODO: Change this to return ByteStringRange. - public RowRange getRowRange() { - return this.tokenProto.getPartition().getRowRange(); + /** + * Get the partition of the current continuation token, represented by a {@link ByteStringRange}. + */ + public ByteStringRange getPartition() { + return ByteStringRange.create( + this.tokenProto.getPartition().getRowRange().getStartKeyClosed(), + this.tokenProto.getPartition().getRowRange().getEndKeyOpen()); } public String getToken() { @@ -95,19 +99,19 @@ public boolean equals(Object o) { return false; } ChangeStreamContinuationToken otherToken = (ChangeStreamContinuationToken) o; - return Objects.equal(getRowRange(), otherToken.getRowRange()) + return Objects.equal(getPartition(), otherToken.getPartition()) && Objects.equal(getToken(), otherToken.getToken()); } @Override public int hashCode() { - return Objects.hashCode(getRowRange(), getToken()); + return Objects.hashCode(getPartition(), getToken()); } @Override public String toString() { return MoreObjects.toStringHelper(this) - .add("rowRange", getRowRange()) + .add("partition", getPartition()) .add("token", getToken()) .toString(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Range.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Range.java index 4d7a10ab2a..c56a4163b8 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Range.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/Range.java @@ -15,10 +15,13 @@ */ package com.google.cloud.bigtable.data.v2.models; +import com.google.api.core.InternalApi; import com.google.api.core.InternalExtensionOnly; +import com.google.bigtable.v2.RowRange; import com.google.common.base.Objects; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; @@ -395,6 +398,22 @@ private void writeObject(ObjectOutputStream output) throws IOException { output.defaultWriteObject(); } + @InternalApi("Used in Changestream beam pipeline.") + public static ByteString toByteString(ByteStringRange byteStringRange) { + return RowRange.newBuilder() + .setStartKeyClosed(byteStringRange.getStart()) + .setEndKeyOpen(byteStringRange.getEnd()) + .build() + .toByteString(); + } + + @InternalApi("Used in Changestream beam pipeline.") + public static ByteStringRange toByteStringRange(ByteString byteString) + throws InvalidProtocolBufferException { + RowRange rowRange = RowRange.newBuilder().mergeFrom(byteString).build(); + return ByteStringRange.create(rowRange.getStartKeyClosed(), rowRange.getEndKeyOpen()); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index 4e29f2a3f5..10eef25e7e 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -74,6 +74,7 @@ import com.google.cloud.bigtable.data.v2.models.DefaultRowAdapter; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; @@ -155,7 +156,7 @@ public class EnhancedBigtableStub implements AutoCloseable { private final UnaryCallable checkAndMutateRowCallable; private final UnaryCallable readModifyWriteRowCallable; - private final ServerStreamingCallable + private final ServerStreamingCallable generateInitialChangeStreamPartitionsCallable; private final ServerStreamingCallable @@ -833,7 +834,7 @@ public Map extract(ReadModifyWriteRowRequest request) { * RowRange}. * */ - private ServerStreamingCallable + private ServerStreamingCallable createGenerateInitialChangeStreamPartitionsCallable() { ServerStreamingCallable< GenerateInitialChangeStreamPartitionsRequest, @@ -862,22 +863,22 @@ public Map extract( .build(), settings.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()); - ServerStreamingCallable userCallable = + ServerStreamingCallable userCallable = new GenerateInitialChangeStreamPartitionsUserCallable(base, requestContext); - ServerStreamingCallable withStatsHeaders = + ServerStreamingCallable withStatsHeaders = new StatsHeadersServerStreamingCallable<>(userCallable); // Sometimes GenerateInitialChangeStreamPartitions connections are disconnected via an RST // frame. This error is transient and should be treated similar to UNAVAILABLE. However, this // exception has an INTERNAL error code which by default is not retryable. Convert the exception // so it can be retried in the client. - ServerStreamingCallable convertException = + ServerStreamingCallable convertException = new ConvertStreamExceptionCallable<>(withStatsHeaders); // Copy idle timeout settings for watchdog. - ServerStreamingCallSettings innerSettings = - ServerStreamingCallSettings.newBuilder() + ServerStreamingCallSettings innerSettings = + ServerStreamingCallSettings.newBuilder() .setRetryableCodes( settings.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()) .setRetrySettings( @@ -886,17 +887,17 @@ public Map extract( settings.generateInitialChangeStreamPartitionsSettings().getIdleTimeout()) .build(); - ServerStreamingCallable watched = + ServerStreamingCallable watched = Callables.watched(convertException, innerSettings, clientContext); - ServerStreamingCallable withBigtableTracer = + ServerStreamingCallable withBigtableTracer = new BigtableTracerStreamingCallable<>(watched); - ServerStreamingCallable retrying = + ServerStreamingCallable retrying = Callables.retrying(withBigtableTracer, innerSettings, clientContext); SpanName span = getSpanName("GenerateInitialChangeStreamPartitions"); - ServerStreamingCallable traced = + ServerStreamingCallable traced = new TracedServerStreamingCallable<>(retrying, clientContext.getTracerFactory(), span); return traced.withDefaultCallContext(clientContext.getDefaultCallContext()); @@ -1039,7 +1040,8 @@ public UnaryCallable readModifyWriteRowCallable() { } /** Returns a streaming generate initial change stream partitions callable */ - public ServerStreamingCallable generateInitialChangeStreamPartitionsCallable() { + public ServerStreamingCallable + generateInitialChangeStreamPartitionsCallable() { return generateInitialChangeStreamPartitionsCallable; } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 9d2a731018..49eb79f5ca 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -33,12 +33,12 @@ import com.google.api.gax.rpc.TransportChannelProvider; import com.google.api.gax.rpc.UnaryCallSettings; import com.google.auth.Credentials; -import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; @@ -212,7 +212,7 @@ public class EnhancedBigtableStubSettings extends StubSettings checkAndMutateRowSettings; private final UnaryCallSettings readModifyWriteRowSettings; - private final ServerStreamingCallSettings + private final ServerStreamingCallSettings generateInitialChangeStreamPartitionsSettings; private final ServerStreamingCallSettings readChangeStreamSettings; @@ -537,7 +537,7 @@ public UnaryCallSettings readModifyWriteRowSettings() { return readModifyWriteRowSettings; } - public ServerStreamingCallSettings + public ServerStreamingCallSettings generateInitialChangeStreamPartitionsSettings() { return generateInitialChangeStreamPartitionsSettings; } @@ -571,7 +571,7 @@ public static class Builder extends StubSettings.Builder checkAndMutateRowSettings; private final UnaryCallSettings.Builder readModifyWriteRowSettings; - private final ServerStreamingCallSettings.Builder + private final ServerStreamingCallSettings.Builder generateInitialChangeStreamPartitionsSettings; private final ServerStreamingCallSettings.Builder readChangeStreamSettings; diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java index 365cf56ff2..ce07018c52 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallable.java @@ -21,16 +21,16 @@ import com.google.api.gax.rpc.StreamController; import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse; -import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; /** * Simple wrapper for GenerateInitialChangeStreamPartitions to wrap the request and response * protobufs. */ public class GenerateInitialChangeStreamPartitionsUserCallable - extends ServerStreamingCallable { + extends ServerStreamingCallable { private final RequestContext requestContext; private final ServerStreamingCallable< GenerateInitialChangeStreamPartitionsRequest, @@ -49,7 +49,7 @@ public GenerateInitialChangeStreamPartitionsUserCallable( @Override public void call( - String tableId, ResponseObserver responseObserver, ApiCallContext context) { + String tableId, ResponseObserver responseObserver, ApiCallContext context) { String tableName = NameUtil.formatTableName( requestContext.getProjectId(), requestContext.getInstanceId(), tableId); @@ -62,12 +62,12 @@ public void call( inner.call(request, new ConvertPartitionToRangeObserver(responseObserver), context); } - private class ConvertPartitionToRangeObserver + private static class ConvertPartitionToRangeObserver implements ResponseObserver { - private final ResponseObserver outerObserver; + private final ResponseObserver outerObserver; - ConvertPartitionToRangeObserver(ResponseObserver observer) { + ConvertPartitionToRangeObserver(ResponseObserver observer) { this.outerObserver = observer; } @@ -78,12 +78,11 @@ public void onStart(final StreamController controller) { @Override public void onResponse(GenerateInitialChangeStreamPartitionsResponse response) { - RowRange rowRange = - RowRange.newBuilder() - .setStartKeyClosed(response.getPartition().getRowRange().getStartKeyClosed()) - .setEndKeyOpen(response.getPartition().getRowRange().getEndKeyOpen()) - .build(); - outerObserver.onResponse(rowRange); + ByteStringRange byteStringRange = + ByteStringRange.create( + response.getPartition().getRowRange().getStartKeyClosed(), + response.getPartition().getRowRange().getEndKeyOpen()); + outerObserver.onResponse(byteStringRange); } @Override diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java index c3850e7e15..f4f23085a2 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientTests.java @@ -24,7 +24,6 @@ import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.UnaryCallable; -import com.google.bigtable.v2.RowRange; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; @@ -32,6 +31,7 @@ import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.Mutation; import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; @@ -83,7 +83,7 @@ public class BigtableDataClientTests { @Mock private Batcher mockBulkReadRowsBatcher; @Mock(answer = Answers.RETURNS_DEEP_STUBS) - private ServerStreamingCallable + private ServerStreamingCallable mockGenerateInitialChangeStreamPartitionsCallable; @Mock(answer = Answers.RETURNS_DEEP_STUBS) @@ -342,7 +342,7 @@ public void proxyGenerateInitialChangeStreamPartitionsAsyncTest() { .thenReturn(mockGenerateInitialChangeStreamPartitionsCallable); @SuppressWarnings("unchecked") - ResponseObserver mockObserver = Mockito.mock(ResponseObserver.class); + ResponseObserver mockObserver = Mockito.mock(ResponseObserver.class); bigtableDataClient.generateInitialChangeStreamPartitionsAsync("fake-table", mockObserver); Mockito.verify(mockGenerateInitialChangeStreamPartitionsCallable) diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java index e93dfc70bf..7716ec06f7 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamContinuationTokenTest.java @@ -39,12 +39,10 @@ private ByteStringRange createFakeByteStringRange() { return ByteStringRange.create("a", "b"); } - // TODO: Get rid of this once we change ChangeStreamContinuationToken::getRowRange() - // to ChangeStreamContinuationToken::getByteStringRange(). - private RowRange rowRangeFromByteStringRange(ByteStringRange byteStringRange) { + private RowRange rowRangeFromPartition(ByteStringRange partition) { return RowRange.newBuilder() - .setStartKeyClosed(byteStringRange.getStart()) - .setEndKeyOpen(byteStringRange.getEnd()) + .setStartKeyClosed(partition.getStart()) + .setEndKeyOpen(partition.getEnd()) .build(); } @@ -53,8 +51,7 @@ public void basicTest() throws Exception { ByteStringRange byteStringRange = createFakeByteStringRange(); ChangeStreamContinuationToken changeStreamContinuationToken = new ChangeStreamContinuationToken(byteStringRange, TOKEN); - Assert.assertEquals( - changeStreamContinuationToken.getRowRange(), rowRangeFromByteStringRange(byteStringRange)); + Assert.assertEquals(changeStreamContinuationToken.getPartition(), byteStringRange); Assert.assertEquals(changeStreamContinuationToken.getToken(), TOKEN); ByteArrayOutputStream bos = new ByteArrayOutputStream(); @@ -69,15 +66,17 @@ public void basicTest() throws Exception { @Test public void fromProtoTest() { ByteStringRange byteStringRange = createFakeByteStringRange(); - RowRange fakeRowRange = rowRangeFromByteStringRange(byteStringRange); StreamContinuationToken proto = StreamContinuationToken.newBuilder() - .setPartition(StreamPartition.newBuilder().setRowRange(fakeRowRange).build()) + .setPartition( + StreamPartition.newBuilder() + .setRowRange(rowRangeFromPartition(byteStringRange)) + .build()) .setToken(TOKEN) .build(); ChangeStreamContinuationToken changeStreamContinuationToken = ChangeStreamContinuationToken.fromProto(proto); - Assert.assertEquals(changeStreamContinuationToken.getRowRange(), fakeRowRange); + Assert.assertEquals(changeStreamContinuationToken.getPartition(), byteStringRange); Assert.assertEquals(changeStreamContinuationToken.getToken(), TOKEN); Assert.assertEquals( changeStreamContinuationToken, @@ -89,8 +88,7 @@ public void toByteStringTest() throws Exception { ByteStringRange byteStringRange = createFakeByteStringRange(); ChangeStreamContinuationToken changeStreamContinuationToken = new ChangeStreamContinuationToken(byteStringRange, TOKEN); - Assert.assertEquals( - changeStreamContinuationToken.getRowRange(), rowRangeFromByteStringRange(byteStringRange)); + Assert.assertEquals(changeStreamContinuationToken.getPartition(), byteStringRange); Assert.assertEquals(changeStreamContinuationToken.getToken(), TOKEN); Assert.assertEquals( changeStreamContinuationToken, diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java index 05df603959..c6aa7580dd 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java @@ -21,6 +21,7 @@ import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.StreamContinuationToken; import com.google.bigtable.v2.StreamPartition; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.protobuf.ByteString; import com.google.protobuf.Timestamp; import com.google.rpc.Status; @@ -119,7 +120,9 @@ public void heartbeatTest() { Heartbeat actualHeartbeat = Heartbeat.fromProto(heartbeatProto); Assert.assertEquals(actualHeartbeat.getLowWatermark(), lowWatermark); - Assert.assertEquals(actualHeartbeat.getChangeStreamContinuationToken().getRowRange(), rowRange); + Assert.assertEquals( + actualHeartbeat.getChangeStreamContinuationToken().getPartition(), + ByteStringRange.create(rowRange.getStartKeyClosed(), rowRange.getEndKeyOpen())); Assert.assertEquals(actualHeartbeat.getChangeStreamContinuationToken().getToken(), token); } @@ -156,11 +159,13 @@ public void closeStreamTest() { Assert.assertEquals(status, actualCloseStream.getStatus()); Assert.assertEquals( - rowRange1, actualCloseStream.getChangeStreamContinuationTokens().get(0).getRowRange()); + actualCloseStream.getChangeStreamContinuationTokens().get(0).getPartition(), + ByteStringRange.create(rowRange1.getStartKeyClosed(), rowRange1.getEndKeyOpen())); Assert.assertEquals( token1, actualCloseStream.getChangeStreamContinuationTokens().get(0).getToken()); Assert.assertEquals( - rowRange2, actualCloseStream.getChangeStreamContinuationTokens().get(1).getRowRange()); + actualCloseStream.getChangeStreamContinuationTokens().get(1).getPartition(), + ByteStringRange.create(rowRange2.getStartKeyClosed(), rowRange2.getEndKeyOpen())); Assert.assertEquals( token2, actualCloseStream.getChangeStreamContinuationTokens().get(1).getToken()); } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/RangeTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/RangeTest.java index eebdba5811..96768e1c82 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/RangeTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/RangeTest.java @@ -21,6 +21,7 @@ import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -306,4 +307,14 @@ public void byteStringSerializationTest() throws IOException, ClassNotFoundExcep ByteStringRange actual = (ByteStringRange) ois.readObject(); assertThat(actual).isEqualTo(expected); } + + @Test + public void byteStringRangeToByteStringTest() throws InvalidProtocolBufferException { + ByteStringRange expected = ByteStringRange.create("a", "z"); + + ByteString serialized = ByteStringRange.toByteString(expected); + ByteStringRange deserialized = ByteStringRange.toByteStringRange(serialized); + + assertThat(expected).isEqualTo(deserialized); + } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java index d23eb64765..5cc04f764d 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java @@ -25,6 +25,7 @@ import com.google.cloud.bigtable.data.v2.models.CloseStream; import com.google.cloud.bigtable.data.v2.models.DefaultChangeStreamRecordAdapter; import com.google.cloud.bigtable.data.v2.models.Heartbeat; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi; import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi.ServerStreamingStashCallable; import com.google.protobuf.ByteString; @@ -47,11 +48,15 @@ public class ChangeStreamRecordMergingCallableTest { @Test public void heartbeatTest() { + RowRange rowRange = RowRange.newBuilder().getDefaultInstanceForType(); ReadChangeStreamResponse.Heartbeat heartbeatProto = ReadChangeStreamResponse.Heartbeat.newBuilder() .setLowWatermark(Timestamp.newBuilder().setSeconds(1000).build()) .setContinuationToken( - StreamContinuationToken.newBuilder().setToken("random-token").build()) + StreamContinuationToken.newBuilder() + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange)) + .setToken("random-token") + .build()) .build(); ReadChangeStreamResponse response = ReadChangeStreamResponse.newBuilder().setHeartbeat(heartbeatProto).build(); @@ -69,8 +74,8 @@ public void heartbeatTest() { Assert.assertTrue(record instanceof Heartbeat); Heartbeat heartbeat = (Heartbeat) record; Assert.assertEquals( - heartbeat.getChangeStreamContinuationToken().getRowRange(), - heartbeatProto.getContinuationToken().getPartition().getRowRange()); + heartbeat.getChangeStreamContinuationToken().getPartition(), + ByteStringRange.create(rowRange.getStartKeyClosed(), rowRange.getEndKeyOpen())); Assert.assertEquals( heartbeat.getChangeStreamContinuationToken().getToken(), heartbeatProto.getContinuationToken().getToken()); @@ -79,16 +84,14 @@ public void heartbeatTest() { @Test public void closeStreamTest() { + RowRange rowRange = + RowRange.newBuilder() + .setStartKeyClosed(ByteString.copyFromUtf8("")) + .setEndKeyOpen(ByteString.copyFromUtf8("")) + .build(); StreamContinuationToken streamContinuationToken = StreamContinuationToken.newBuilder() - .setPartition( - StreamPartition.newBuilder() - .setRowRange( - RowRange.newBuilder() - .setStartKeyClosed(ByteString.copyFromUtf8("")) - .setEndKeyOpen(ByteString.copyFromUtf8("")) - .build()) - .build()) + .setPartition(StreamPartition.newBuilder().setRowRange(rowRange).build()) .setToken("random-token") .build(); ReadChangeStreamResponse.CloseStream closeStreamProto = @@ -116,8 +119,8 @@ public void closeStreamTest() { ChangeStreamContinuationToken changeStreamContinuationToken = closeStream.getChangeStreamContinuationTokens().get(0); Assert.assertEquals( - changeStreamContinuationToken.getRowRange(), - streamContinuationToken.getPartition().getRowRange()); + changeStreamContinuationToken.getPartition(), + ByteStringRange.create(rowRange.getStartKeyClosed(), rowRange.getEndKeyOpen())); Assert.assertEquals( changeStreamContinuationToken.getToken(), streamContinuationToken.getToken()); } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java index 908961be77..885b1c6355 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/GenerateInitialChangeStreamPartitionsUserCallableTest.java @@ -23,10 +23,10 @@ import com.google.bigtable.v2.StreamPartition; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; +import com.google.cloud.bigtable.data.v2.models.Range.ByteStringRange; import com.google.cloud.bigtable.gaxx.testing.FakeStreamingApi; import com.google.common.collect.Lists; import com.google.common.truth.Truth; -import com.google.protobuf.ByteString; import java.util.List; import org.junit.Test; import org.junit.runner.RunWith; @@ -69,24 +69,15 @@ public void responseIsConverted() { GenerateInitialChangeStreamPartitionsResponse.newBuilder() .setPartition( StreamPartition.newBuilder() - .setRowRange( - RowRange.newBuilder() - .setStartKeyClosed(ByteString.copyFromUtf8("apple")) - .setEndKeyOpen(ByteString.copyFromUtf8("banana")) - .build()) + .setRowRange(RowRange.newBuilder().getDefaultInstanceForType()) .build()) .build())); GenerateInitialChangeStreamPartitionsUserCallable generateInitialChangeStreamPartitionsUserCallable = new GenerateInitialChangeStreamPartitionsUserCallable(inner, requestContext); - List results = + List results = generateInitialChangeStreamPartitionsUserCallable.all().call("my-table"); - Truth.assertThat(results) - .containsExactly( - RowRange.newBuilder() - .setStartKeyClosed(ByteString.copyFromUtf8("apple")) - .setEndKeyOpen(ByteString.copyFromUtf8("banana")) - .build()); + Truth.assertThat(results).containsExactly(ByteStringRange.create("", "")); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java index 5ae88a7f9f..ef8b9fec9f 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java @@ -24,6 +24,7 @@ import com.google.bigtable.v2.Mutation; import com.google.bigtable.v2.ReadChangeStreamRequest; import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.StreamContinuationToken; import com.google.bigtable.v2.StreamPartition; import com.google.bigtable.v2.TimestampRange; @@ -123,6 +124,7 @@ public void test() throws Exception { for (ChangeStreamRecord record : stream) { if (record instanceof Heartbeat) { Heartbeat heartbeat = (Heartbeat) record; + ChangeStreamContinuationToken token = heartbeat.getChangeStreamContinuationToken(); ReadChangeStreamResponse.Heartbeat heartbeatProto = ReadChangeStreamResponse.Heartbeat.newBuilder() .setContinuationToken( @@ -130,7 +132,10 @@ public void test() throws Exception { .setPartition( StreamPartition.newBuilder() .setRowRange( - heartbeat.getChangeStreamContinuationToken().getRowRange()) + RowRange.newBuilder() + .setStartKeyClosed(token.getPartition().getStart()) + .setEndKeyOpen(token.getPartition().getEnd()) + .build()) .build()) .setToken(heartbeat.getChangeStreamContinuationToken().getToken()) .build()) @@ -152,7 +157,12 @@ public void test() throws Exception { builder.addContinuationTokens( StreamContinuationToken.newBuilder() .setPartition( - StreamPartition.newBuilder().setRowRange(token.getRowRange()).build()) + StreamPartition.newBuilder() + .setRowRange( + RowRange.newBuilder() + .setStartKeyClosed(token.getPartition().getStart()) + .setEndKeyOpen(token.getPartition().getEnd()) + .build())) .setToken(token.getToken()) .build()); } From 351a1513d75136de3e6147dd4d65600bd2c9d06b Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Tue, 16 Aug 2022 10:49:50 -0400 Subject: [PATCH 12/13] =?UTF-8?q?feat:=20Return=20MutationType=20and=20big?= =?UTF-8?q?table.common.Status=20instead=20of=20raw=20p=E2=80=A6=20(#1359)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Return MutationType and bigtable.common.Status instead of raw protos * fix: remove unused import * fix: fix test Co-authored-by: Teng Zhong --- .../data/v2/models/ChangeStreamMutation.java | 19 +++++++++++-------- .../bigtable/data/v2/models/CloseStream.java | 14 +++++++------- .../v2/models/ChangeStreamMutationTest.java | 7 ++----- .../v2/models/ChangeStreamRecordTest.java | 2 +- ...ChangeStreamRecordMergingCallableTest.java | 2 +- ...ReadChangeStreamMergingAcceptanceTest.java | 13 +++++++++++-- 6 files changed, 33 insertions(+), 24 deletions(-) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java index cfb8bb30b7..3d11d5d4fc 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutation.java @@ -16,7 +16,6 @@ package com.google.cloud.bigtable.data.v2.models; import com.google.api.core.InternalExtensionOnly; -import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; import com.google.cloud.bigtable.data.v2.models.Range.TimestampRange; import com.google.cloud.bigtable.data.v2.stub.changestream.ChangeStreamRecordMerger; import com.google.common.base.MoreObjects; @@ -69,10 +68,14 @@ public class ChangeStreamMutation implements ChangeStreamRecord, Serializable { private static final long serialVersionUID = 8419520253162024218L; + public enum MutationType { + USER, + GARBAGE_COLLECTION + } + private final ByteString rowKey; - /** Possible values: USER/GARBAGE_COLLECTION. */ - private final Type type; + private final MutationType type; /** This should only be set when type==USER. */ private final String sourceClusterId; @@ -108,7 +111,7 @@ static Builder createUserMutation( @Nonnull String sourceClusterId, @Nonnull Timestamp commitTimestamp, int tieBreaker) { - return new Builder(rowKey, Type.USER, sourceClusterId, commitTimestamp, tieBreaker); + return new Builder(rowKey, MutationType.USER, sourceClusterId, commitTimestamp, tieBreaker); } /** @@ -118,7 +121,7 @@ static Builder createUserMutation( */ static Builder createGcMutation( @Nonnull ByteString rowKey, @Nonnull Timestamp commitTimestamp, int tieBreaker) { - return new Builder(rowKey, Type.GARBAGE_COLLECTION, null, commitTimestamp, tieBreaker); + return new Builder(rowKey, MutationType.GARBAGE_COLLECTION, null, commitTimestamp, tieBreaker); } private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { @@ -142,7 +145,7 @@ public ByteString getRowKey() { /** Get the type of the current mutation. */ @Nonnull - public Type getType() { + public MutationType getType() { return this.type; } @@ -190,7 +193,7 @@ Builder toBuilder() { public static class Builder { private final ByteString rowKey; - private final Type type; + private final MutationType type; private final String sourceClusterId; @@ -206,7 +209,7 @@ public static class Builder { private Builder( ByteString rowKey, - Type type, + MutationType type, String sourceClusterId, Timestamp commitTimestamp, int tieBreaker) { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java index 346b0b60a7..795e05029a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/models/CloseStream.java @@ -50,8 +50,8 @@ private CloseStream(Status status, List continuationTok } @InternalApi("Used in Changestream beam pipeline.") - public Status getStatus() { - return this.status; + public com.google.cloud.bigtable.common.Status getStatus() { + return com.google.cloud.bigtable.common.Status.fromProto(this.status); } @InternalApi("Used in Changestream beam pipeline.") @@ -88,21 +88,21 @@ public boolean equals(Object o) { return false; } CloseStream record = (CloseStream) o; - return Objects.equal(status, record.getStatus()) + return Objects.equal(getStatus(), record.getStatus()) && Objects.equal( - changeStreamContinuationTokens.build(), record.getChangeStreamContinuationTokens()); + getChangeStreamContinuationTokens(), record.getChangeStreamContinuationTokens()); } @Override public int hashCode() { - return Objects.hashCode(status, changeStreamContinuationTokens); + return Objects.hashCode(getStatus(), getChangeStreamContinuationTokens()); } @Override public String toString() { return MoreObjects.toStringHelper(this) - .add("status", status) - .add("changeStreamContinuationTokens", changeStreamContinuationTokens) + .add("status", getStatus()) + .add("changeStreamContinuationTokens", getChangeStreamContinuationTokens()) .toString(); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java index a14fe001cd..1052a1646d 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamMutationTest.java @@ -19,7 +19,6 @@ import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowsRequest; -import com.google.bigtable.v2.ReadChangeStreamResponse; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; import com.google.common.primitives.Longs; @@ -72,8 +71,7 @@ public void userInitiatedMutationTest() throws IOException, ClassNotFoundExcepti // Test the getters. Assert.assertEquals(changeStreamMutation.getRowKey(), ByteString.copyFromUtf8("key")); - Assert.assertEquals( - changeStreamMutation.getType(), ReadChangeStreamResponse.DataChange.Type.USER); + Assert.assertEquals(changeStreamMutation.getType(), ChangeStreamMutation.MutationType.USER); Assert.assertEquals(changeStreamMutation.getSourceClusterId(), "fake-source-cluster-id"); Assert.assertEquals(changeStreamMutation.getCommitTimestamp(), fakeCommitTimestamp); Assert.assertEquals(changeStreamMutation.getTieBreaker(), 0); @@ -115,8 +113,7 @@ public void gcMutationTest() throws IOException, ClassNotFoundException { // Test the getters. Assert.assertEquals(changeStreamMutation.getRowKey(), ByteString.copyFromUtf8("key")); Assert.assertEquals( - changeStreamMutation.getType(), - ReadChangeStreamResponse.DataChange.Type.GARBAGE_COLLECTION); + changeStreamMutation.getType(), ChangeStreamMutation.MutationType.GARBAGE_COLLECTION); Assert.assertNull(changeStreamMutation.getSourceClusterId()); Assert.assertEquals(changeStreamMutation.getCommitTimestamp(), fakeCommitTimestamp); Assert.assertEquals(changeStreamMutation.getTieBreaker(), 0); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java index c6aa7580dd..ed54628f67 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/models/ChangeStreamRecordTest.java @@ -157,7 +157,7 @@ public void closeStreamTest() { .build(); CloseStream actualCloseStream = CloseStream.fromProto(closeStreamProto); - Assert.assertEquals(status, actualCloseStream.getStatus()); + Assert.assertEquals(status, actualCloseStream.getStatus().toProto()); Assert.assertEquals( actualCloseStream.getChangeStreamContinuationTokens().get(0).getPartition(), ByteStringRange.create(rowRange1.getStartKeyClosed(), rowRange1.getEndKeyOpen())); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java index 5cc04f764d..56cc5aa845 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ChangeStreamRecordMergingCallableTest.java @@ -114,7 +114,7 @@ public void closeStreamTest() { ChangeStreamRecord record = results.get(0); Assert.assertTrue(record instanceof CloseStream); CloseStream closeStream = (CloseStream) record; - Assert.assertEquals(closeStream.getStatus(), closeStreamProto.getStatus()); + Assert.assertEquals(closeStream.getStatus().toProto(), closeStreamProto.getStatus()); Assert.assertEquals(closeStream.getChangeStreamContinuationTokens().size(), 1); ChangeStreamContinuationToken changeStreamContinuationToken = closeStream.getChangeStreamContinuationTokens().get(0); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java index ef8b9fec9f..483562aa4a 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/changestream/ReadChangeStreamMergingAcceptanceTest.java @@ -24,6 +24,7 @@ import com.google.bigtable.v2.Mutation; import com.google.bigtable.v2.ReadChangeStreamRequest; import com.google.bigtable.v2.ReadChangeStreamResponse; +import com.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type; import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.StreamContinuationToken; import com.google.bigtable.v2.StreamPartition; @@ -151,7 +152,8 @@ public void test() throws Exception { } else if (record instanceof CloseStream) { CloseStream closeStream = (CloseStream) record; ReadChangeStreamResponse.CloseStream.Builder builder = - ReadChangeStreamResponse.CloseStream.newBuilder().setStatus(closeStream.getStatus()); + ReadChangeStreamResponse.CloseStream.newBuilder() + .setStatus(closeStream.getStatus().toProto()); for (ChangeStreamContinuationToken token : closeStream.getChangeStreamContinuationTokens()) { builder.addContinuationTokens( @@ -179,7 +181,14 @@ public void test() throws Exception { ReadChangeStreamTest.TestChangeStreamMutation.Builder builder = ReadChangeStreamTest.TestChangeStreamMutation.newBuilder(); builder.setRowKey(changeStreamMutation.getRowKey()); - builder.setType(changeStreamMutation.getType()); + Type type = Type.UNRECOGNIZED; + if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.USER) { + type = Type.USER; + } else if (changeStreamMutation.getType() + == ChangeStreamMutation.MutationType.GARBAGE_COLLECTION) { + type = Type.GARBAGE_COLLECTION; + } + builder.setType(type); if (changeStreamMutation.getSourceClusterId() != null) { builder.setSourceClusterId(changeStreamMutation.getSourceClusterId()); } From c631fa4f9e2118806a4aa55c20808ac90afa96b9 Mon Sep 17 00:00:00 2001 From: tengzhonger <109308630+tengzhonger@users.noreply.github.com> Date: Thu, 1 Sep 2022 16:16:22 -0400 Subject: [PATCH 13/13] feat: Expose CDC data API settings in EnhancedBigtableStubSettings (#1376) Co-authored-by: Teng Zhong --- .../v2/stub/EnhancedBigtableStubSettings.java | 14 ++++ .../EnhancedBigtableStubSettingsTest.java | 75 +++++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 49eb79f5ca..f31c5308c5 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -884,6 +884,20 @@ public UnaryCallSettings.Builder readModifyWriteRowSett return readModifyWriteRowSettings; } + /** Returns the builder for the settings used for calls to ReadChangeStream. */ + public ServerStreamingCallSettings.Builder + readChangeStreamSettings() { + return readChangeStreamSettings; + } + + /** + * Returns the builder for the settings used for calls to GenerateInitialChangeStreamPartitions. + */ + public ServerStreamingCallSettings.Builder + generateInitialChangeStreamPartitionsSettings() { + return generateInitialChangeStreamPartitionsSettings; + } + @SuppressWarnings("unchecked") public EnhancedBigtableStubSettings build() { Preconditions.checkState(projectId != null, "Project id must be set"); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index 731ba7f77e..413bba34d4 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -634,6 +634,81 @@ public void checkAndMutateRowSettingsAreNotLostTest() { .isEqualTo(retrySettings); } + @Test + public void generateInitialChangeStreamPartitionsSettingsAreNotLostTest() { + String dummyProjectId = "my-project"; + String dummyInstanceId = "my-instance"; + + EnhancedBigtableStubSettings.Builder builder = + EnhancedBigtableStubSettings.newBuilder() + .setProjectId(dummyProjectId) + .setInstanceId(dummyInstanceId); + + RetrySettings retrySettings = RetrySettings.newBuilder().build(); + builder + .generateInitialChangeStreamPartitionsSettings() + .setRetryableCodes(Code.ABORTED, Code.DEADLINE_EXCEEDED) + .setRetrySettings(retrySettings) + .build(); + + assertThat(builder.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()) + .containsAtLeast(Code.ABORTED, Code.DEADLINE_EXCEEDED); + assertThat(builder.generateInitialChangeStreamPartitionsSettings().getRetrySettings()) + .isEqualTo(retrySettings); + + assertThat(builder.build().generateInitialChangeStreamPartitionsSettings().getRetryableCodes()) + .containsAtLeast(Code.ABORTED, Code.DEADLINE_EXCEEDED); + assertThat(builder.build().generateInitialChangeStreamPartitionsSettings().getRetrySettings()) + .isEqualTo(retrySettings); + + assertThat( + builder + .build() + .toBuilder() + .generateInitialChangeStreamPartitionsSettings() + .getRetryableCodes()) + .containsAtLeast(Code.ABORTED, Code.DEADLINE_EXCEEDED); + assertThat( + builder + .build() + .toBuilder() + .generateInitialChangeStreamPartitionsSettings() + .getRetrySettings()) + .isEqualTo(retrySettings); + } + + @Test + public void readChangeStreamSettingsAreNotLostTest() { + String dummyProjectId = "my-project"; + String dummyInstanceId = "my-instance"; + + EnhancedBigtableStubSettings.Builder builder = + EnhancedBigtableStubSettings.newBuilder() + .setProjectId(dummyProjectId) + .setInstanceId(dummyInstanceId); + + RetrySettings retrySettings = RetrySettings.newBuilder().build(); + builder + .readChangeStreamSettings() + .setRetryableCodes(Code.ABORTED, Code.DEADLINE_EXCEEDED) + .setRetrySettings(retrySettings) + .build(); + + assertThat(builder.readChangeStreamSettings().getRetryableCodes()) + .containsAtLeast(Code.ABORTED, Code.DEADLINE_EXCEEDED); + assertThat(builder.readChangeStreamSettings().getRetrySettings()).isEqualTo(retrySettings); + + assertThat(builder.build().readChangeStreamSettings().getRetryableCodes()) + .containsAtLeast(Code.ABORTED, Code.DEADLINE_EXCEEDED); + assertThat(builder.build().readChangeStreamSettings().getRetrySettings()) + .isEqualTo(retrySettings); + + assertThat(builder.build().toBuilder().readChangeStreamSettings().getRetryableCodes()) + .containsAtLeast(Code.ABORTED, Code.DEADLINE_EXCEEDED); + assertThat(builder.build().toBuilder().readChangeStreamSettings().getRetrySettings()) + .isEqualTo(retrySettings); + } + @Test public void checkAndMutateRowSettingsAreSane() { UnaryCallSettings.Builder builder =