From ec3ea29efae1cf6567055d43219690b3d2db8b5e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 27 Jun 2022 18:26:11 +0000 Subject: [PATCH] fix: Modify client lib retry policy for CreateWriteStream with longer backoff, more error code and longer overall time (#1679) - [ ] Regenerate this pull request now. PiperOrigin-RevId: 457061436 Source-Link: https://github.com/googleapis/googleapis/commit/8ff130bc81fa1d175e410d14a300caa18d5ebf80 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2eb0faca717d9cf44b838b7db5e862451b8a86ef Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMmViMGZhY2E3MTdkOWNmNDRiODM4YjdkYjVlODYyNDUxYjhhODZlZiJ9 feat: add fields to eventually contain row level errors Committer: @gnanda PiperOrigin-RevId: 456324780 Source-Link: https://github.com/googleapis/googleapis/commit/f24b37a351260ddce8208edae50d637fa0b88d6b Source-Link: https://github.com/googleapis/googleapis-gen/commit/33f9d814082117116c4b68a6f5aac3f42bec35c2 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMzNmOWQ4MTQwODIxMTcxMTZjNGI2OGE2ZjVhYWMzZjQyYmVjMzVjMiJ9 --- .../storage/v1/BaseBigQueryReadClient.java | 14 +- .../v1/stub/BigQueryWriteStubSettings.java | 38 +- .../storage/v1/BigQueryWriteClientTest.java | 5 +- .../storage/v1/AppendRowsResponse.java | 546 ++++++++- .../v1/AppendRowsResponseOrBuilder.java | 62 + .../storage/v1/CreateReadSessionRequest.java | 40 +- .../v1/CreateReadSessionRequestOrBuilder.java | 10 +- .../cloud/bigquery/storage/v1/DataFormat.java | 20 +- .../bigquery/storage/v1/ReadSession.java | 14 +- .../storage/v1/ReadSessionOrBuilder.java | 4 +- .../cloud/bigquery/storage/v1/RowError.java | 1019 +++++++++++++++++ .../storage/v1/RowErrorOrBuilder.java | 88 ++ .../bigquery/storage/v1/StorageProto.java | 210 ++-- .../bigquery/storage/v1/TableSchema.java | 8 +- .../cloud/bigquery/storage/v1/storage.proto | 38 +- .../cloud/bigquery/storage/v1/stream.proto | 3 +- .../cloud/bigquery/storage/v1/table.proto | 4 +- 17 files changed, 1968 insertions(+), 155 deletions(-) create mode 100644 proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java create mode 100644 proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java index e89210e3d2..92044d9a4d 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClient.java @@ -188,9 +188,10 @@ public BigQueryReadStub getStub() { * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide * a value of streams so as to produce reasonable throughput. Must be non-negative. The number * of streams may be lower than the requested number, depending on the amount parallelism that - * is reasonable for the table. Error will be returned if the max count is greater than the - * current system max limit of 1,000. - *

Streams must be read starting from offset 0. + * is reasonable for the table. There is a default system max limit of 1,000. + *

This must be greater than or equal to preferred_min_stream_count. Typically, clients + * should either leave this unset to let the system to determine an upper bound OR set this a + * size for the maximum "units of work" it can gracefully handle. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ReadSession createReadSession( @@ -243,9 +244,10 @@ public final ReadSession createReadSession( * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide * a value of streams so as to produce reasonable throughput. Must be non-negative. The number * of streams may be lower than the requested number, depending on the amount parallelism that - * is reasonable for the table. Error will be returned if the max count is greater than the - * current system max limit of 1,000. - *

Streams must be read starting from offset 0. + * is reasonable for the table. There is a default system max limit of 1,000. + *

This must be greater than or equal to preferred_min_stream_count. Typically, clients + * should either leave this unset to let the system to determine an upper bound OR set this a + * size for the maximum "units of work" it can gracefully handle. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ReadSession createReadSession( diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java index 418a1ecfa4..e026fd68cd 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -243,13 +243,20 @@ public static class Builder extends StubSettings.Builder> definitions = ImmutableMap.builder(); definitions.put( - "retry_policy_4_codes", + "retry_policy_5_codes", ImmutableSet.copyOf( Lists.newArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); definitions.put( "retry_policy_3_codes", ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put( + "retry_policy_4_codes", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } @@ -260,15 +267,15 @@ public static class Builder extends StubSettings.Builder()) + .build(); mockBigQueryWrite.addResponse(expectedResponse); AppendRowsRequest request = AppendRowsRequest.newBuilder() diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java index 7c1ca2dedd..7b4c2c8c9c 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java @@ -37,7 +37,9 @@ private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder bui super(builder); } - private AppendRowsResponse() {} + private AppendRowsResponse() { + rowErrors_ = java.util.Collections.emptyList(); + } @java.lang.Override @SuppressWarnings({"unused"}) @@ -58,6 +60,7 @@ private AppendRowsResponse( if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -121,6 +124,18 @@ private AppendRowsResponse( break; } + case 34: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + rowErrors_ = + new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + rowErrors_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1.RowError.parser(), extensionRegistry)); + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -137,6 +152,9 @@ private AppendRowsResponse( } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + rowErrors_ = java.util.Collections.unmodifiableList(rowErrors_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -1185,6 +1203,85 @@ public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchem return getUpdatedSchema(); } + public static final int ROW_ERRORS_FIELD_NUMBER = 4; + private java.util.List rowErrors_; + /** + * + * + *

+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public java.util.List getRowErrorsList() { + return rowErrors_; + } + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public java.util.List + getRowErrorsOrBuilderList() { + return rowErrors_; + } + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public int getRowErrorsCount() { + return rowErrors_.size(); + } + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError getRowErrors(int index) { + return rowErrors_.get(index); + } + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index) { + return rowErrors_.get(index); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -1209,6 +1306,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (updatedSchema_ != null) { output.writeMessage(3, getUpdatedSchema()); } + for (int i = 0; i < rowErrors_.size(); i++) { + output.writeMessage(4, rowErrors_.get(i)); + } unknownFields.writeTo(output); } @@ -1231,6 +1331,9 @@ public int getSerializedSize() { if (updatedSchema_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdatedSchema()); } + for (int i = 0; i < rowErrors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, rowErrors_.get(i)); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1251,6 +1354,7 @@ public boolean equals(final java.lang.Object obj) { if (hasUpdatedSchema()) { if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false; } + if (!getRowErrorsList().equals(other.getRowErrorsList())) return false; if (!getResponseCase().equals(other.getResponseCase())) return false; switch (responseCase_) { case 1: @@ -1277,6 +1381,10 @@ public int hashCode() { hash = (37 * hash) + UPDATED_SCHEMA_FIELD_NUMBER; hash = (53 * hash) + getUpdatedSchema().hashCode(); } + if (getRowErrorsCount() > 0) { + hash = (37 * hash) + ROW_ERRORS_FIELD_NUMBER; + hash = (53 * hash) + getRowErrorsList().hashCode(); + } switch (responseCase_) { case 1: hash = (37 * hash) + APPEND_RESULT_FIELD_NUMBER; @@ -1429,7 +1537,9 @@ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getRowErrorsFieldBuilder(); + } } @java.lang.Override @@ -1441,6 +1551,12 @@ public Builder clear() { updatedSchema_ = null; updatedSchemaBuilder_ = null; } + if (rowErrorsBuilder_ == null) { + rowErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + rowErrorsBuilder_.clear(); + } responseCase_ = 0; response_ = null; return this; @@ -1470,6 +1586,7 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsResponse build() { public com.google.cloud.bigquery.storage.v1.AppendRowsResponse buildPartial() { com.google.cloud.bigquery.storage.v1.AppendRowsResponse result = new com.google.cloud.bigquery.storage.v1.AppendRowsResponse(this); + int from_bitField0_ = bitField0_; if (responseCase_ == 1) { if (appendResultBuilder_ == null) { result.response_ = response_; @@ -1489,6 +1606,15 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsResponse buildPartial() { } else { result.updatedSchema_ = updatedSchemaBuilder_.build(); } + if (rowErrorsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + rowErrors_ = java.util.Collections.unmodifiableList(rowErrors_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.rowErrors_ = rowErrors_; + } else { + result.rowErrors_ = rowErrorsBuilder_.build(); + } result.responseCase_ = responseCase_; onBuilt(); return result; @@ -1543,6 +1669,33 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsResponse if (other.hasUpdatedSchema()) { mergeUpdatedSchema(other.getUpdatedSchema()); } + if (rowErrorsBuilder_ == null) { + if (!other.rowErrors_.isEmpty()) { + if (rowErrors_.isEmpty()) { + rowErrors_ = other.rowErrors_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRowErrorsIsMutable(); + rowErrors_.addAll(other.rowErrors_); + } + onChanged(); + } + } else { + if (!other.rowErrors_.isEmpty()) { + if (rowErrorsBuilder_.isEmpty()) { + rowErrorsBuilder_.dispose(); + rowErrorsBuilder_ = null; + rowErrors_ = other.rowErrors_; + bitField0_ = (bitField0_ & ~0x00000001); + rowErrorsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getRowErrorsFieldBuilder() + : null; + } else { + rowErrorsBuilder_.addAllMessages(other.rowErrors_); + } + } + } switch (other.getResponseCase()) { case APPEND_RESULT: { @@ -1603,6 +1756,8 @@ public Builder clearResponse() { return this; } + private int bitField0_; + private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult, com.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.Builder, @@ -2357,6 +2512,393 @@ public com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchem return updatedSchemaBuilder_; } + private java.util.List rowErrors_ = + java.util.Collections.emptyList(); + + private void ensureRowErrorsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + rowErrors_ = + new java.util.ArrayList(rowErrors_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.RowError, + com.google.cloud.bigquery.storage.v1.RowError.Builder, + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder> + rowErrorsBuilder_; + + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public java.util.List getRowErrorsList() { + if (rowErrorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(rowErrors_); + } else { + return rowErrorsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public int getRowErrorsCount() { + if (rowErrorsBuilder_ == null) { + return rowErrors_.size(); + } else { + return rowErrorsBuilder_.getCount(); + } + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError getRowErrors(int index) { + if (rowErrorsBuilder_ == null) { + return rowErrors_.get(index); + } else { + return rowErrorsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder setRowErrors(int index, com.google.cloud.bigquery.storage.v1.RowError value) { + if (rowErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowErrorsIsMutable(); + rowErrors_.set(index, value); + onChanged(); + } else { + rowErrorsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder setRowErrors( + int index, com.google.cloud.bigquery.storage.v1.RowError.Builder builderForValue) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.set(index, builderForValue.build()); + onChanged(); + } else { + rowErrorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors(com.google.cloud.bigquery.storage.v1.RowError value) { + if (rowErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowErrorsIsMutable(); + rowErrors_.add(value); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors(int index, com.google.cloud.bigquery.storage.v1.RowError value) { + if (rowErrorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRowErrorsIsMutable(); + rowErrors_.add(index, value); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors( + com.google.cloud.bigquery.storage.v1.RowError.Builder builderForValue) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.add(builderForValue.build()); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addRowErrors( + int index, com.google.cloud.bigquery.storage.v1.RowError.Builder builderForValue) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.add(index, builderForValue.build()); + onChanged(); + } else { + rowErrorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder addAllRowErrors( + java.lang.Iterable values) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, rowErrors_); + onChanged(); + } else { + rowErrorsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder clearRowErrors() { + if (rowErrorsBuilder_ == null) { + rowErrors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + rowErrorsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public Builder removeRowErrors(int index) { + if (rowErrorsBuilder_ == null) { + ensureRowErrorsIsMutable(); + rowErrors_.remove(index); + onChanged(); + } else { + rowErrorsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError.Builder getRowErrorsBuilder(int index) { + return getRowErrorsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index) { + if (rowErrorsBuilder_ == null) { + return rowErrors_.get(index); + } else { + return rowErrorsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public java.util.List + getRowErrorsOrBuilderList() { + if (rowErrorsBuilder_ != null) { + return rowErrorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rowErrors_); + } + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError.Builder addRowErrorsBuilder() { + return getRowErrorsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance()); + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public com.google.cloud.bigquery.storage.v1.RowError.Builder addRowErrorsBuilder(int index) { + return getRowErrorsFieldBuilder() + .addBuilder(index, com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance()); + } + /** + * + * + *
+     * If a request failed due to corrupted rows, no rows in the batch will be
+     * appended. The API will return row level error info, so that the caller can
+     * remove the bad rows and retry the request.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + public java.util.List + getRowErrorsBuilderList() { + return getRowErrorsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.RowError, + com.google.cloud.bigquery.storage.v1.RowError.Builder, + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder> + getRowErrorsFieldBuilder() { + if (rowErrorsBuilder_ == null) { + rowErrorsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1.RowError, + com.google.cloud.bigquery.storage.v1.RowError.Builder, + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder>( + rowErrors_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + rowErrors_ = null; + } + return rowErrorsBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java index 69a81948c0..0226d78642 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java @@ -177,5 +177,67 @@ public interface AppendRowsResponseOrBuilder */ com.google.cloud.bigquery.storage.v1.TableSchemaOrBuilder getUpdatedSchemaOrBuilder(); + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + java.util.List getRowErrorsList(); + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + com.google.cloud.bigquery.storage.v1.RowError getRowErrors(int index); + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + int getRowErrorsCount(); + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + java.util.List + getRowErrorsOrBuilderList(); + /** + * + * + *
+   * If a request failed due to corrupted rows, no rows in the batch will be
+   * appended. The API will return row level error info, so that the caller can
+   * remove the bad rows and retry the request.
+   * 
+ * + * repeated .google.cloud.bigquery.storage.v1.RowError row_errors = 4; + */ + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index); + public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.ResponseCase getResponseCase(); } diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java index 25200ad887..4727bcb074 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java @@ -252,10 +252,12 @@ public com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder getReadSessionO * Max initial number of streams. If unset or zero, the server will * provide a value of streams so as to produce reasonable throughput. Must be * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * Streams must be read starting from offset 0. + * depending on the amount parallelism that is reasonable for the table. + * There is a default system max limit of 1,000. + * This must be greater than or equal to preferred_min_stream_count. + * Typically, clients should either leave this unset to let the system to + * determine an upper bound OR set this a size for the maximum "units of work" + * it can gracefully handle. * * * int32 max_stream_count = 3; @@ -965,10 +967,12 @@ public com.google.cloud.bigquery.storage.v1.ReadSessionOrBuilder getReadSessionO * Max initial number of streams. If unset or zero, the server will * provide a value of streams so as to produce reasonable throughput. Must be * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * Streams must be read starting from offset 0. + * depending on the amount parallelism that is reasonable for the table. + * There is a default system max limit of 1,000. + * This must be greater than or equal to preferred_min_stream_count. + * Typically, clients should either leave this unset to let the system to + * determine an upper bound OR set this a size for the maximum "units of work" + * it can gracefully handle. * * * int32 max_stream_count = 3; @@ -986,10 +990,12 @@ public int getMaxStreamCount() { * Max initial number of streams. If unset or zero, the server will * provide a value of streams so as to produce reasonable throughput. Must be * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * Streams must be read starting from offset 0. + * depending on the amount parallelism that is reasonable for the table. + * There is a default system max limit of 1,000. + * This must be greater than or equal to preferred_min_stream_count. + * Typically, clients should either leave this unset to let the system to + * determine an upper bound OR set this a size for the maximum "units of work" + * it can gracefully handle. * * * int32 max_stream_count = 3; @@ -1010,10 +1016,12 @@ public Builder setMaxStreamCount(int value) { * Max initial number of streams. If unset or zero, the server will * provide a value of streams so as to produce reasonable throughput. Must be * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * Streams must be read starting from offset 0. + * depending on the amount parallelism that is reasonable for the table. + * There is a default system max limit of 1,000. + * This must be greater than or equal to preferred_min_stream_count. + * Typically, clients should either leave this unset to let the system to + * determine an upper bound OR set this a size for the maximum "units of work" + * it can gracefully handle. * * * int32 max_stream_count = 3; diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java index d522222bc1..39836f0e4a 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java @@ -102,10 +102,12 @@ public interface CreateReadSessionRequestOrBuilder * Max initial number of streams. If unset or zero, the server will * provide a value of streams so as to produce reasonable throughput. Must be * non-negative. The number of streams may be lower than the requested number, - * depending on the amount parallelism that is reasonable for the table. Error - * will be returned if the max count is greater than the current system - * max limit of 1,000. - * Streams must be read starting from offset 0. + * depending on the amount parallelism that is reasonable for the table. + * There is a default system max limit of 1,000. + * This must be greater than or equal to preferred_min_stream_count. + * Typically, clients should either leave this unset to let the system to + * determine an upper bound OR set this a size for the maximum "units of work" + * it can gracefully handle. * * * int32 max_stream_count = 3; diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java index 7829837394..d5965f5cce 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/DataFormat.java @@ -28,7 +28,15 @@ * Protobuf enum {@code google.cloud.bigquery.storage.v1.DataFormat} */ public enum DataFormat implements com.google.protobuf.ProtocolMessageEnum { - /** DATA_FORMAT_UNSPECIFIED = 0; */ + /** + * + * + *
+   * Data format is unspecified.
+   * 
+ * + * DATA_FORMAT_UNSPECIFIED = 0; + */ DATA_FORMAT_UNSPECIFIED(0), /** * @@ -55,7 +63,15 @@ public enum DataFormat implements com.google.protobuf.ProtocolMessageEnum { UNRECOGNIZED(-1), ; - /** DATA_FORMAT_UNSPECIFIED = 0; */ + /** + * + * + *
+   * Data format is unspecified.
+   * 
+ * + * DATA_FORMAT_UNSPECIFIED = 0; + */ public static final int DATA_FORMAT_UNSPECIFIED_VALUE = 0; /** * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java index ea60388fe9..fe46f2f551 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -2769,7 +2769,7 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { * * *
-   * Immutable. Data format of the output data.
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
    * 
* * @@ -2786,7 +2786,7 @@ public int getDataFormatValue() { * * *
-   * Immutable. Data format of the output data.
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
    * 
* * @@ -4204,7 +4204,7 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { * * *
-     * Immutable. Data format of the output data.
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
      * 
* * @@ -4221,7 +4221,7 @@ public int getDataFormatValue() { * * *
-     * Immutable. Data format of the output data.
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
      * 
* * @@ -4241,7 +4241,7 @@ public Builder setDataFormatValue(int value) { * * *
-     * Immutable. Data format of the output data.
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
      * 
* * @@ -4261,7 +4261,7 @@ public com.google.cloud.bigquery.storage.v1.DataFormat getDataFormat() { * * *
-     * Immutable. Data format of the output data.
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
      * 
* * @@ -4284,7 +4284,7 @@ public Builder setDataFormat(com.google.cloud.bigquery.storage.v1.DataFormat val * * *
-     * Immutable. Data format of the output data.
+     * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
      * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java index 007df97171..58569f3706 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -98,7 +98,7 @@ public interface ReadSessionOrBuilder * * *
-   * Immutable. Data format of the output data.
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
    * 
* * @@ -112,7 +112,7 @@ public interface ReadSessionOrBuilder * * *
-   * Immutable. Data format of the output data.
+   * Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
    * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java new file mode 100644 index 0000000000..db6511924c --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowError.java @@ -0,0 +1,1019 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +/** + * + * + *
+ * The message that presents row level error info in a request.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.RowError} + */ +public final class RowError extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1.RowError) + RowErrorOrBuilder { + private static final long serialVersionUID = 0L; + // Use RowError.newBuilder() to construct. + private RowError(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private RowError() { + code_ = 0; + message_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new RowError(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private RowError( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + index_ = input.readInt64(); + break; + } + case 16: + { + int rawValue = input.readEnum(); + + code_ = rawValue; + break; + } + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + + message_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.RowError.class, + com.google.cloud.bigquery.storage.v1.RowError.Builder.class); + } + + /** + * + * + *
+   * Error code for `RowError`.
+   * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1.RowError.RowErrorCode} + */ + public enum RowErrorCode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Default error.
+     * 
+ * + * ROW_ERROR_CODE_UNSPECIFIED = 0; + */ + ROW_ERROR_CODE_UNSPECIFIED(0), + /** + * + * + *
+     * One or more fields in the row has errors.
+     * 
+ * + * FIELDS_ERROR = 1; + */ + FIELDS_ERROR(1), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Default error.
+     * 
+ * + * ROW_ERROR_CODE_UNSPECIFIED = 0; + */ + public static final int ROW_ERROR_CODE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * One or more fields in the row has errors.
+     * 
+ * + * FIELDS_ERROR = 1; + */ + public static final int FIELDS_ERROR_VALUE = 1; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowErrorCode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static RowErrorCode forNumber(int value) { + switch (value) { + case 0: + return ROW_ERROR_CODE_UNSPECIFIED; + case 1: + return FIELDS_ERROR; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RowErrorCode findValueByNumber(int number) { + return RowErrorCode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.RowError.getDescriptor().getEnumTypes().get(0); + } + + private static final RowErrorCode[] VALUES = values(); + + public static RowErrorCode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private RowErrorCode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.RowError.RowErrorCode) + } + + public static final int INDEX_FIELD_NUMBER = 1; + private long index_; + /** + * + * + *
+   * Index of the malformed row in the request.
+   * 
+ * + * int64 index = 1; + * + * @return The index. + */ + @java.lang.Override + public long getIndex() { + return index_; + } + + public static final int CODE_FIELD_NUMBER = 2; + private int code_; + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode getCode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode result = + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.valueOf(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.UNRECOGNIZED + : result; + } + + public static final int MESSAGE_FIELD_NUMBER = 3; + private volatile java.lang.Object message_; + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The message. + */ + @java.lang.Override + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + message_ = s; + return s; + } + } + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The bytes for message. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (index_ != 0L) { + output.writeInt64(1, index_); + } + if (code_ + != com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.ROW_ERROR_CODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(message_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, message_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (index_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, index_); + } + if (code_ + != com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.ROW_ERROR_CODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, code_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(message_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, message_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1.RowError)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1.RowError other = + (com.google.cloud.bigquery.storage.v1.RowError) obj; + + if (getIndex() != other.getIndex()) return false; + if (code_ != other.code_) return false; + if (!getMessage().equals(other.getMessage())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + INDEX_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getIndex()); + hash = (37 * hash) + CODE_FIELD_NUMBER; + hash = (53 * hash) + code_; + hash = (37 * hash) + MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getMessage().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1.RowError parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1.RowError prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * The message that presents row level error info in a request.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1.RowError} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1.RowError) + com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1.RowError.class, + com.google.cloud.bigquery.storage.v1.RowError.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1.RowError.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + index_ = 0L; + + code_ = 0; + + message_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1.StorageProto + .internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError build() { + com.google.cloud.bigquery.storage.v1.RowError result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError buildPartial() { + com.google.cloud.bigquery.storage.v1.RowError result = + new com.google.cloud.bigquery.storage.v1.RowError(this); + result.index_ = index_; + result.code_ = code_; + result.message_ = message_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1.RowError) { + return mergeFrom((com.google.cloud.bigquery.storage.v1.RowError) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.RowError other) { + if (other == com.google.cloud.bigquery.storage.v1.RowError.getDefaultInstance()) return this; + if (other.getIndex() != 0L) { + setIndex(other.getIndex()); + } + if (other.code_ != 0) { + setCodeValue(other.getCodeValue()); + } + if (!other.getMessage().isEmpty()) { + message_ = other.message_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1.RowError parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.google.cloud.bigquery.storage.v1.RowError) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private long index_; + /** + * + * + *
+     * Index of the malformed row in the request.
+     * 
+ * + * int64 index = 1; + * + * @return The index. + */ + @java.lang.Override + public long getIndex() { + return index_; + } + /** + * + * + *
+     * Index of the malformed row in the request.
+     * 
+ * + * int64 index = 1; + * + * @param value The index to set. + * @return This builder for chaining. + */ + public Builder setIndex(long value) { + + index_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Index of the malformed row in the request.
+     * 
+ * + * int64 index = 1; + * + * @return This builder for chaining. + */ + public Builder clearIndex() { + + index_ = 0L; + onChanged(); + return this; + } + + private int code_ = 0; + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The enum numeric value on the wire for code. + */ + @java.lang.Override + public int getCodeValue() { + return code_; + } + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @param value The enum numeric value on the wire for code to set. + * @return This builder for chaining. + */ + public Builder setCodeValue(int value) { + + code_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The code. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode getCode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode result = + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.valueOf(code_); + return result == null + ? com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @param value The code to set. + * @return This builder for chaining. + */ + public Builder setCode(com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode value) { + if (value == null) { + throw new NullPointerException(); + } + + code_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Structured error reason for a row error.
+     * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return This builder for chaining. + */ + public Builder clearCode() { + + code_ = 0; + onChanged(); + return this; + } + + private java.lang.Object message_ = ""; + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @return The message. + */ + public java.lang.String getMessage() { + java.lang.Object ref = message_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + message_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @return The bytes for message. + */ + public com.google.protobuf.ByteString getMessageBytes() { + java.lang.Object ref = message_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + message_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @param value The message to set. + * @return This builder for chaining. + */ + public Builder setMessage(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + message_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @return This builder for chaining. + */ + public Builder clearMessage() { + + message_ = getDefaultInstance().getMessage(); + onChanged(); + return this; + } + /** + * + * + *
+     * Description of the issue encountered when processing the row.
+     * 
+ * + * string message = 3; + * + * @param value The bytes for message to set. + * @return This builder for chaining. + */ + public Builder setMessageBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + message_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1.RowError) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1.RowError) + private static final com.google.cloud.bigquery.storage.v1.RowError DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1.RowError(); + } + + public static com.google.cloud.bigquery.storage.v1.RowError getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RowError parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RowError(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1.RowError getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java new file mode 100644 index 0000000000..107ef08f15 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/RowErrorOrBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1/storage.proto + +package com.google.cloud.bigquery.storage.v1; + +public interface RowErrorOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1.RowError) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Index of the malformed row in the request.
+   * 
+ * + * int64 index = 1; + * + * @return The index. + */ + long getIndex(); + + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The enum numeric value on the wire for code. + */ + int getCodeValue(); + /** + * + * + *
+   * Structured error reason for a row error.
+   * 
+ * + * .google.cloud.bigquery.storage.v1.RowError.RowErrorCode code = 2; + * + * @return The code. + */ + com.google.cloud.bigquery.storage.v1.RowError.RowErrorCode getCode(); + + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The message. + */ + java.lang.String getMessage(); + /** + * + * + *
+   * Description of the issue encountered when processing the row.
+   * 
+ * + * string message = 3; + * + * @return The bytes for message. + */ + com.google.protobuf.ByteString getMessageBytes(); +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java index dd10f98b11..3c8e5fa627 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java @@ -111,6 +111,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_cloud_bigquery_storage_v1_StorageError_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_cloud_bigquery_storage_v1_StorageError_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -178,106 +182,112 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "otoData\022D\n\rwriter_schema\030\001 \001(\0132-.google." + "cloud.bigquery.storage.v1.ProtoSchema\0229\n" + "\004rows\030\002 \001(\0132+.google.cloud.bigquery.stor" - + "age.v1.ProtoRowsB\006\n\004rows\"\245\002\n\022AppendRowsR" + + "age.v1.ProtoRowsB\006\n\004rows\"\345\002\n\022AppendRowsR" + "esponse\022Z\n\rappend_result\030\001 \001(\0132A.google." + "cloud.bigquery.storage.v1.AppendRowsResp" + "onse.AppendResultH\000\022#\n\005error\030\002 \001(\0132\022.goo" + "gle.rpc.StatusH\000\022E\n\016updated_schema\030\003 \001(\013" + "2-.google.cloud.bigquery.storage.v1.Tabl" - + "eSchema\032;\n\014AppendResult\022+\n\006offset\030\001 \001(\0132" - + "\033.google.protobuf.Int64ValueB\n\n\010response" - + "\"Y\n\025GetWriteStreamRequest\022@\n\004name\030\001 \001(\tB" - + "2\340A\002\372A,\n*bigquerystorage.googleapis.com/" - + "WriteStream\"s\n\036BatchCommitWriteStreamsRe" - + "quest\0225\n\006parent\030\001 \001(\tB%\340A\002\372A\037\n\035bigquery." - + "googleapis.com/Table\022\032\n\rwrite_streams\030\002 " - + "\003(\tB\003\340A\002\"\231\001\n\037BatchCommitWriteStreamsResp" - + "onse\022/\n\013commit_time\030\001 \001(\0132\032.google.proto" - + "buf.Timestamp\022E\n\rstream_errors\030\002 \003(\0132..g" - + "oogle.cloud.bigquery.storage.v1.StorageE" - + "rror\"^\n\032FinalizeWriteStreamRequest\022@\n\004na" - + "me\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorage.google" - + "apis.com/WriteStream\"0\n\033FinalizeWriteStr" - + "eamResponse\022\021\n\trow_count\030\001 \001(\003\"\211\001\n\020Flush" - + "RowsRequest\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A" - + ",\n*bigquerystorage.googleapis.com/WriteS" - + "tream\022+\n\006offset\030\002 \001(\0132\033.google.protobuf." - + "Int64Value\"#\n\021FlushRowsResponse\022\016\n\006offse" - + "t\030\001 \001(\003\"\245\003\n\014StorageError\022M\n\004code\030\001 \001(\0162?" - + ".google.cloud.bigquery.storage.v1.Storag" - + "eError.StorageErrorCode\022\016\n\006entity\030\002 \001(\t\022" - + "\025\n\rerror_message\030\003 \001(\t\"\236\002\n\020StorageErrorC" - + "ode\022\"\n\036STORAGE_ERROR_CODE_UNSPECIFIED\020\000\022" - + "\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREAM_ALREADY_C" - + "OMMITTED\020\002\022\024\n\020STREAM_NOT_FOUND\020\003\022\027\n\023INVA" - + "LID_STREAM_TYPE\020\004\022\030\n\024INVALID_STREAM_STAT" - + "E\020\005\022\024\n\020STREAM_FINALIZED\020\006\022 \n\034SCHEMA_MISM" - + "ATCH_EXTRA_FIELDS\020\007\022\031\n\025OFFSET_ALREADY_EX" - + "ISTS\020\010\022\027\n\023OFFSET_OUT_OF_RANGE\020\t2\222\006\n\014BigQ" - + "ueryRead\022\351\001\n\021CreateReadSession\022:.google." - + "cloud.bigquery.storage.v1.CreateReadSess" - + "ionRequest\032-.google.cloud.bigquery.stora" - + "ge.v1.ReadSession\"i\202\323\344\223\002<\"7/v1/{read_ses" - + "sion.table=projects/*/datasets/*/tables/" - + "*}:\001*\332A$parent,read_session,max_stream_c" - + "ount\022\317\001\n\010ReadRows\0221.google.cloud.bigquer" - + "y.storage.v1.ReadRowsRequest\0322.google.cl" - + "oud.bigquery.storage.v1.ReadRowsResponse" - + "\"Z\202\323\344\223\002?\022=/v1/{read_stream=projects/*/lo" - + "cations/*/sessions/*/streams/*}\332A\022read_s" - + "tream,offset0\001\022\306\001\n\017SplitReadStream\0228.goo" - + "gle.cloud.bigquery.storage.v1.SplitReadS" - + "treamRequest\0329.google.cloud.bigquery.sto" - + "rage.v1.SplitReadStreamResponse\">\202\323\344\223\0028\022" - + "6/v1/{name=projects/*/locations/*/sessio" - + "ns/*/streams/*}\032{\312A\036bigquerystorage.goog" - + "leapis.com\322AWhttps://www.googleapis.com/" - + "auth/bigquery,https://www.googleapis.com" - + "/auth/cloud-platform2\274\013\n\rBigQueryWrite\022\327" - + "\001\n\021CreateWriteStream\022:.google.cloud.bigq" - + "uery.storage.v1.CreateWriteStreamRequest" - + "\032-.google.cloud.bigquery.storage.v1.Writ" - + "eStream\"W\202\323\344\223\002;\"+/v1/{parent=projects/*/" - + "datasets/*/tables/*}:\014write_stream\332A\023par" - + "ent,write_stream\022\322\001\n\nAppendRows\0223.google" - + ".cloud.bigquery.storage.v1.AppendRowsReq" - + "uest\0324.google.cloud.bigquery.storage.v1." - + "AppendRowsResponse\"U\202\323\344\223\002@\";/v1/{write_s" - + "tream=projects/*/datasets/*/tables/*/str" - + "eams/*}:\001*\332A\014write_stream(\0010\001\022\277\001\n\016GetWri" - + "teStream\0227.google.cloud.bigquery.storage" - + ".v1.GetWriteStreamRequest\032-.google.cloud" - + ".bigquery.storage.v1.WriteStream\"E\202\323\344\223\0028" - + "\"3/v1/{name=projects/*/datasets/*/tables" - + "/*/streams/*}:\001*\332A\004name\022\331\001\n\023FinalizeWrit" - + "eStream\022<.google.cloud.bigquery.storage." - + "v1.FinalizeWriteStreamRequest\032=.google.c" - + "loud.bigquery.storage.v1.FinalizeWriteSt" - + "reamResponse\"E\202\323\344\223\0028\"3/v1/{name=projects" - + "/*/datasets/*/tables/*/streams/*}:\001*\332A\004n" - + "ame\022\334\001\n\027BatchCommitWriteStreams\022@.google" - + ".cloud.bigquery.storage.v1.BatchCommitWr" - + "iteStreamsRequest\032A.google.cloud.bigquer" - + "y.storage.v1.BatchCommitWriteStreamsResp" - + "onse\"<\202\323\344\223\002-\022+/v1/{parent=projects/*/dat" - + "asets/*/tables/*}\332A\006parent\022\313\001\n\tFlushRows" - + "\0222.google.cloud.bigquery.storage.v1.Flus" - + "hRowsRequest\0323.google.cloud.bigquery.sto" - + "rage.v1.FlushRowsResponse\"U\202\323\344\223\002@\";/v1/{" - + "write_stream=projects/*/datasets/*/table" - + "s/*/streams/*}:\001*\332A\014write_stream\032\260\001\312A\036bi" - + "gquerystorage.googleapis.com\322A\213\001https://" - + "www.googleapis.com/auth/bigquery,https:/" - + "/www.googleapis.com/auth/bigquery.insert" - + "data,https://www.googleapis.com/auth/clo" - + "ud-platformB\235\002\n$com.google.cloud.bigquer" - + "y.storage.v1B\014StorageProtoP\001ZGgoogle.gol" - + "ang.org/genproto/googleapis/cloud/bigque" - + "ry/storage/v1;storage\252\002 Google.Cloud.Big" - + "Query.Storage.V1\312\002 Google\\Cloud\\BigQuery" - + "\\Storage\\V1\352AU\n\035bigquery.googleapis.com/" - + "Table\0224projects/{project}/datasets/{data" - + "set}/tables/{table}b\006proto3" + + "eSchema\022>\n\nrow_errors\030\004 \003(\0132*.google.clo" + + "ud.bigquery.storage.v1.RowError\032;\n\014Appen" + + "dResult\022+\n\006offset\030\001 \001(\0132\033.google.protobu" + + "f.Int64ValueB\n\n\010response\"Y\n\025GetWriteStre" + + "amRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquer" + + "ystorage.googleapis.com/WriteStream\"s\n\036B" + + "atchCommitWriteStreamsRequest\0225\n\006parent\030" + + "\001 \001(\tB%\340A\002\372A\037\n\035bigquery.googleapis.com/T" + + "able\022\032\n\rwrite_streams\030\002 \003(\tB\003\340A\002\"\231\001\n\037Bat" + + "chCommitWriteStreamsResponse\022/\n\013commit_t" + + "ime\030\001 \001(\0132\032.google.protobuf.Timestamp\022E\n" + + "\rstream_errors\030\002 \003(\0132..google.cloud.bigq" + + "uery.storage.v1.StorageError\"^\n\032Finalize" + + "WriteStreamRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372A," + + "\n*bigquerystorage.googleapis.com/WriteSt" + + "ream\"0\n\033FinalizeWriteStreamResponse\022\021\n\tr" + + "ow_count\030\001 \001(\003\"\211\001\n\020FlushRowsRequest\022H\n\014w" + + "rite_stream\030\001 \001(\tB2\340A\002\372A,\n*bigquerystora" + + "ge.googleapis.com/WriteStream\022+\n\006offset\030" + + "\002 \001(\0132\033.google.protobuf.Int64Value\"#\n\021Fl" + + "ushRowsResponse\022\016\n\006offset\030\001 \001(\003\"\245\003\n\014Stor" + + "ageError\022M\n\004code\030\001 \001(\0162?.google.cloud.bi" + + "gquery.storage.v1.StorageError.StorageEr" + + "rorCode\022\016\n\006entity\030\002 \001(\t\022\025\n\rerror_message" + + "\030\003 \001(\t\"\236\002\n\020StorageErrorCode\022\"\n\036STORAGE_E" + + "RROR_CODE_UNSPECIFIED\020\000\022\023\n\017TABLE_NOT_FOU" + + "ND\020\001\022\034\n\030STREAM_ALREADY_COMMITTED\020\002\022\024\n\020ST" + + "REAM_NOT_FOUND\020\003\022\027\n\023INVALID_STREAM_TYPE\020" + + "\004\022\030\n\024INVALID_STREAM_STATE\020\005\022\024\n\020STREAM_FI" + + "NALIZED\020\006\022 \n\034SCHEMA_MISMATCH_EXTRA_FIELD" + + "S\020\007\022\031\n\025OFFSET_ALREADY_EXISTS\020\010\022\027\n\023OFFSET" + + "_OUT_OF_RANGE\020\t\"\263\001\n\010RowError\022\r\n\005index\030\001 " + + "\001(\003\022E\n\004code\030\002 \001(\01627.google.cloud.bigquer" + + "y.storage.v1.RowError.RowErrorCode\022\017\n\007me" + + "ssage\030\003 \001(\t\"@\n\014RowErrorCode\022\036\n\032ROW_ERROR" + + "_CODE_UNSPECIFIED\020\000\022\020\n\014FIELDS_ERROR\020\0012\222\006" + + "\n\014BigQueryRead\022\351\001\n\021CreateReadSession\022:.g" + + "oogle.cloud.bigquery.storage.v1.CreateRe" + + "adSessionRequest\032-.google.cloud.bigquery" + + ".storage.v1.ReadSession\"i\202\323\344\223\002<\"7/v1/{re" + + "ad_session.table=projects/*/datasets/*/t" + + "ables/*}:\001*\332A$parent,read_session,max_st" + + "ream_count\022\317\001\n\010ReadRows\0221.google.cloud.b" + + "igquery.storage.v1.ReadRowsRequest\0322.goo" + + "gle.cloud.bigquery.storage.v1.ReadRowsRe" + + "sponse\"Z\202\323\344\223\002?\022=/v1/{read_stream=project" + + "s/*/locations/*/sessions/*/streams/*}\332A\022" + + "read_stream,offset0\001\022\306\001\n\017SplitReadStream" + + "\0228.google.cloud.bigquery.storage.v1.Spli" + + "tReadStreamRequest\0329.google.cloud.bigque" + + "ry.storage.v1.SplitReadStreamResponse\">\202" + + "\323\344\223\0028\0226/v1/{name=projects/*/locations/*/" + + "sessions/*/streams/*}\032{\312A\036bigquerystorag" + + "e.googleapis.com\322AWhttps://www.googleapi" + + "s.com/auth/bigquery,https://www.googleap" + + "is.com/auth/cloud-platform2\274\013\n\rBigQueryW" + + "rite\022\327\001\n\021CreateWriteStream\022:.google.clou" + + "d.bigquery.storage.v1.CreateWriteStreamR" + + "equest\032-.google.cloud.bigquery.storage.v" + + "1.WriteStream\"W\202\323\344\223\002;\"+/v1/{parent=proje" + + "cts/*/datasets/*/tables/*}:\014write_stream" + + "\332A\023parent,write_stream\022\322\001\n\nAppendRows\0223." + + "google.cloud.bigquery.storage.v1.AppendR" + + "owsRequest\0324.google.cloud.bigquery.stora" + + "ge.v1.AppendRowsResponse\"U\202\323\344\223\002@\";/v1/{w" + + "rite_stream=projects/*/datasets/*/tables" + + "/*/streams/*}:\001*\332A\014write_stream(\0010\001\022\277\001\n\016" + + "GetWriteStream\0227.google.cloud.bigquery.s" + + "torage.v1.GetWriteStreamRequest\032-.google" + + ".cloud.bigquery.storage.v1.WriteStream\"E" + + "\202\323\344\223\0028\"3/v1/{name=projects/*/datasets/*/" + + "tables/*/streams/*}:\001*\332A\004name\022\331\001\n\023Finali" + + "zeWriteStream\022<.google.cloud.bigquery.st" + + "orage.v1.FinalizeWriteStreamRequest\032=.go" + + "ogle.cloud.bigquery.storage.v1.FinalizeW" + + "riteStreamResponse\"E\202\323\344\223\0028\"3/v1/{name=pr" + + "ojects/*/datasets/*/tables/*/streams/*}:" + + "\001*\332A\004name\022\334\001\n\027BatchCommitWriteStreams\022@." + + "google.cloud.bigquery.storage.v1.BatchCo" + + "mmitWriteStreamsRequest\032A.google.cloud.b" + + "igquery.storage.v1.BatchCommitWriteStrea" + + "msResponse\"<\202\323\344\223\002-\022+/v1/{parent=projects" + + "/*/datasets/*/tables/*}\332A\006parent\022\313\001\n\tFlu" + + "shRows\0222.google.cloud.bigquery.storage.v" + + "1.FlushRowsRequest\0323.google.cloud.bigque" + + "ry.storage.v1.FlushRowsResponse\"U\202\323\344\223\002@\"" + + ";/v1/{write_stream=projects/*/datasets/*" + + "/tables/*/streams/*}:\001*\332A\014write_stream\032\260" + + "\001\312A\036bigquerystorage.googleapis.com\322A\213\001ht" + + "tps://www.googleapis.com/auth/bigquery,h" + + "ttps://www.googleapis.com/auth/bigquery." + + "insertdata,https://www.googleapis.com/au" + + "th/cloud-platformB\235\002\n$com.google.cloud.b" + + "igquery.storage.v1B\014StorageProtoP\001ZGgoog" + + "le.golang.org/genproto/googleapis/cloud/" + + "bigquery/storage/v1;storage\252\002 Google.Clo" + + "ud.BigQuery.Storage.V1\312\002 Google\\Cloud\\Bi" + + "gQuery\\Storage\\V1\352AU\n\035bigquery.googleapi" + + "s.com/Table\0224projects/{project}/datasets" + + "/{dataset}/tables/{table}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -402,7 +412,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor, new java.lang.String[] { - "AppendResult", "Error", "UpdatedSchema", "Response", + "AppendResult", "Error", "UpdatedSchema", "RowErrors", "Response", }); internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor = internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor @@ -478,6 +488,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Code", "Entity", "ErrorMessage", }); + internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_google_cloud_bigquery_storage_v1_RowError_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1_RowError_descriptor, + new java.lang.String[] { + "Index", "Code", "Message", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.ClientProto.defaultHost); diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java index 01bf6f843f..9ad403d8b1 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableSchema.java @@ -22,7 +22,9 @@ * * *
- * Schema of a table.
+ * Schema of a table. This schema is a subset of
+ * google.cloud.bigquery.v2.TableSchema containing information necessary to
+ * generate valid message to write to BigQuery.
  * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1.TableSchema} @@ -359,7 +361,9 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Schema of a table.
+   * Schema of a table. This schema is a subset of
+   * google.cloud.bigquery.v2.TableSchema containing information necessary to
+   * generate valid message to write to BigQuery.
    * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1.TableSchema} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto index 67c6c8a029..f3c974c646 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto @@ -248,11 +248,13 @@ message CreateReadSessionRequest { // Max initial number of streams. If unset or zero, the server will // provide a value of streams so as to produce reasonable throughput. Must be // non-negative. The number of streams may be lower than the requested number, - // depending on the amount parallelism that is reasonable for the table. Error - // will be returned if the max count is greater than the current system - // max limit of 1,000. + // depending on the amount parallelism that is reasonable for the table. + // There is a default system max limit of 1,000. // - // Streams must be read starting from offset 0. + // This must be greater than or equal to preferred_min_stream_count. + // Typically, clients should either leave this unset to let the system to + // determine an upper bound OR set this a size for the maximum "units of work" + // it can gracefully handle. int32 max_stream_count = 3; } @@ -329,7 +331,7 @@ message ReadRowsResponse { // The schema for the read. If read_options.selected_fields is set, the // schema may be different from the table schema as it will only contain - // the selected fields. This schema is equivelant to the one returned by + // the selected fields. This schema is equivalent to the one returned by // CreateSession. This field is only populated in the first ReadRowsResponse // RPC. oneof schema { @@ -488,6 +490,11 @@ message AppendRowsResponse { // use it to input new type of message. It will be empty when no schema // updates have occurred. TableSchema updated_schema = 3; + + // If a request failed due to corrupted rows, no rows in the batch will be + // appended. The API will return row level error info, so that the caller can + // remove the bad rows and retry the request. + repeated RowError row_errors = 4; } // Request message for `GetWriteStreamRequest`. @@ -622,3 +629,24 @@ message StorageError { // Message that describes the error. string error_message = 3; } + +// The message that presents row level error info in a request. +message RowError { + // Error code for `RowError`. + enum RowErrorCode { + // Default error. + ROW_ERROR_CODE_UNSPECIFIED = 0; + + // One or more fields in the row has errors. + FIELDS_ERROR = 1; + } + + // Index of the malformed row in the request. + int64 index = 1; + + // Structured error reason for a row error. + RowErrorCode code = 2; + + // Description of the issue encountered when processing the row. + string message = 3; +} diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto index bd1fa2ce98..fd1e25b65f 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -32,6 +32,7 @@ option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; // Data format for input or output data. enum DataFormat { + // Data format is unspecified. DATA_FORMAT_UNSPECIFIED = 0; // Avro is a standard open source row based file format. @@ -91,7 +92,7 @@ message ReadSession { // automatically assigned and currently cannot be specified or updated. google.protobuf.Timestamp expire_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Immutable. Data format of the output data. + // Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported. DataFormat data_format = 3 [(google.api.field_behavior) = IMMUTABLE]; // The schema for the read. If read_options.selected_fields is set, the diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto index 545f629271..fa4f840c58 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto @@ -25,7 +25,9 @@ option java_outer_classname = "TableProto"; option java_package = "com.google.cloud.bigquery.storage.v1"; option php_namespace = "Google\\Cloud\\BigQuery\\Storage\\V1"; -// Schema of a table. +// Schema of a table. This schema is a subset of +// google.cloud.bigquery.v2.TableSchema containing information necessary to +// generate valid message to write to BigQuery. message TableSchema { // Describes the fields in a table. repeated TableFieldSchema fields = 1;